naman1102 commited on
Commit
34292b8
·
1 Parent(s): 75ef2fd
Files changed (2) hide show
  1. app.py +19 -18
  2. requirements.txt +2 -1
app.py CHANGED
@@ -5,13 +5,13 @@ import inspect
5
  import pandas as pd
6
  import ast
7
  import operator
 
8
  from typing import List, Dict, Any, Optional, Annotated
9
  from langgraph.graph import Graph, StateGraph
10
  from langgraph.prebuilt import ToolNode
11
  from tools import simple_search
12
- from huggingface_hub import InferenceClient
13
  from typing_extensions import TypedDict
14
- import time
15
 
16
  def override(_, new): return new
17
 
@@ -19,7 +19,7 @@ print("trial")
19
  # (Keep Constants as is)
20
  # --- Constants ---
21
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
22
- HF_TOKEN = os.getenv("HF_TOKEN") # Make sure to set this environment variable
23
 
24
 
25
  class AgentState(TypedDict):
@@ -33,16 +33,12 @@ class AgentState(TypedDict):
33
 
34
  class BasicAgent:
35
  def __init__(self):
36
- print("Initializing BasicAgent with Qwen2.5-Coder-7B-Instruct...")
37
- if not HF_TOKEN:
38
- raise ValueError("HF_TOKEN environment variable not set. Please set your Hugging Face API token.")
39
 
40
- # Initialize LLM client
41
- self.llm = InferenceClient(
42
- # model="Qwen/Qwen2.5-Coder-7B-Instruct",
43
- model = "mistralai/Mistral-7B-Instruct-v0.2",
44
- token=HF_TOKEN
45
- )
46
 
47
  # Create the agent workflow
48
  print("Creating workflow variable")
@@ -54,16 +50,21 @@ class BasicAgent:
54
  try:
55
  print("=== Sending prompt ===")
56
  print(prompt[:500])
57
- response = self.llm.text_generation(
58
- prompt,
59
- max_new_tokens=200,
 
 
 
 
60
  temperature=0.7,
61
  top_p=0.95,
62
- repetition_penalty=1.1
63
  )
64
  print("=== Received response ===")
65
- print(response)
66
- return response
 
67
  except Exception as e:
68
  print(f"Error calling LLM API: {e}")
69
  return f"Error getting response from LLM: {str(e)}"
 
5
  import pandas as pd
6
  import ast
7
  import operator
8
+ import time
9
  from typing import List, Dict, Any, Optional, Annotated
10
  from langgraph.graph import Graph, StateGraph
11
  from langgraph.prebuilt import ToolNode
12
  from tools import simple_search
13
+ from openai import OpenAI
14
  from typing_extensions import TypedDict
 
15
 
16
  def override(_, new): return new
17
 
 
19
  # (Keep Constants as is)
20
  # --- Constants ---
21
  DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
22
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") # Make sure to set this environment variable
23
 
24
 
25
  class AgentState(TypedDict):
 
33
 
34
  class BasicAgent:
35
  def __init__(self):
36
+ print("Initializing BasicAgent with OpenAI...")
37
+ if not OPENAI_API_KEY:
38
+ raise ValueError("OPENAI_API_KEY environment variable not set. Please set your OpenAI API key.")
39
 
40
+ # Initialize OpenAI client
41
+ self.llm = OpenAI(api_key=OPENAI_API_KEY)
 
 
 
 
42
 
43
  # Create the agent workflow
44
  print("Creating workflow variable")
 
50
  try:
51
  print("=== Sending prompt ===")
52
  print(prompt[:500])
53
+ response = self.llm.chat.completions.create(
54
+ model="gpt-3.5-turbo",
55
+ messages=[
56
+ {"role": "system", "content": "You are a helpful AI assistant that provides clear and concise answers."},
57
+ {"role": "user", "content": prompt}
58
+ ],
59
+ max_tokens=200,
60
  temperature=0.7,
61
  top_p=0.95,
62
+ frequency_penalty=0.1
63
  )
64
  print("=== Received response ===")
65
+ response_text = response.choices[0].message.content
66
+ print(response_text)
67
+ return response_text
68
  except Exception as e:
69
  print(f"Error calling LLM API: {e}")
70
  return f"Error getting response from LLM: {str(e)}"
requirements.txt CHANGED
@@ -5,4 +5,5 @@ pydantic
5
  duckduckgo-search
6
  langchain
7
  langchain-community
8
- huggingface-hub
 
 
5
  duckduckgo-search
6
  langchain
7
  langchain-community
8
+ huggingface-hub
9
+ openai