Spaces:
Sleeping
Sleeping
Testing HF InterfaceClient
Browse files
app.py
CHANGED
@@ -178,6 +178,11 @@ if __name__ == "__main__":
|
|
178 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
179 |
|
180 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
|
|
|
|
|
|
|
|
|
|
181 |
|
182 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
183 |
demo.launch(debug=True, share=False)
|
|
|
178 |
print("ℹ️ SPACE_ID environment variable not found (running locally?). Repo URL cannot be determined.")
|
179 |
|
180 |
print("-"*(60 + len(" App Starting ")) + "\n")
|
181 |
+
print("\n" + "*"*30 + " Debug area " + "*"*30)
|
182 |
+
from utils import callHfInferenceClientLLM
|
183 |
+
response = callHfInferenceClientLLM("What is the capital of France?")
|
184 |
+
print(response)
|
185 |
+
print("\n" + "*"*30 + " Debug area " + "*"*30)
|
186 |
|
187 |
print("Launching Gradio Interface for Basic Agent Evaluation...")
|
188 |
demo.launch(debug=True, share=False)
|
config.py
CHANGED
@@ -3,9 +3,7 @@ baseApiUrl = "https://agents-course-unit4-scoring.hf.space"
|
|
3 |
questionsUrl = f"{baseApiUrl}/questions"
|
4 |
submitUrl = f"{baseApiUrl}/submit"
|
5 |
|
6 |
-
|
7 |
-
localModel = "llama3.2"
|
8 |
-
hfMoldel = "(tbd)"
|
9 |
|
10 |
logLevel = 'DEBUG'
|
11 |
logFile = 'log.txt'
|
|
|
3 |
questionsUrl = f"{baseApiUrl}/questions"
|
4 |
submitUrl = f"{baseApiUrl}/submit"
|
5 |
|
6 |
+
hfMoldel = "meta-llama/Llama-3.3-70B-Instruct"
|
|
|
|
|
7 |
|
8 |
logLevel = 'DEBUG'
|
9 |
logFile = 'log.txt'
|
utils.py
CHANGED
@@ -1,19 +1,19 @@
|
|
1 |
|
2 |
-
import
|
3 |
-
if config.runLocal:
|
4 |
-
from ollama import chat as OllamaChat
|
5 |
|
|
|
6 |
from langchain_community.tools import DuckDuckGoSearchRun
|
|
|
7 |
|
8 |
|
9 |
def callWebSearch(query):
|
10 |
return DuckDuckGo(query)
|
11 |
|
12 |
def callLLM(query):
|
13 |
-
if
|
14 |
return callLocalLLM(query)
|
15 |
else:
|
16 |
-
return
|
17 |
|
18 |
def DuckDuckGo(query):
|
19 |
search_tool = DuckDuckGoSearchRun()
|
@@ -21,12 +21,22 @@ def DuckDuckGo(query):
|
|
21 |
return results
|
22 |
|
23 |
def callLocalLLM(query):
|
24 |
-
response = OllamaChat(model=
|
25 |
return response['message']['content']
|
26 |
|
27 |
-
def
|
28 |
-
|
|
|
|
|
|
|
|
|
29 |
|
30 |
if __name__ == "__main__":
|
|
|
|
|
|
|
|
|
31 |
response = callWebSearch("who is the president of France")
|
|
|
|
|
32 |
print(response)
|
|
|
1 |
|
2 |
+
import os
|
|
|
|
|
3 |
|
4 |
+
from huggingface_hub import InferenceClient
|
5 |
from langchain_community.tools import DuckDuckGoSearchRun
|
6 |
+
import config
|
7 |
|
8 |
|
9 |
def callWebSearch(query):
|
10 |
return DuckDuckGo(query)
|
11 |
|
12 |
def callLLM(query):
|
13 |
+
if "LOCALLLM" in os.environ:
|
14 |
return callLocalLLM(query)
|
15 |
else:
|
16 |
+
return callHfInferenceClientLLM(query)
|
17 |
|
18 |
def DuckDuckGo(query):
|
19 |
search_tool = DuckDuckGoSearchRun()
|
|
|
21 |
return results
|
22 |
|
23 |
def callLocalLLM(query):
|
24 |
+
response = OllamaChat(model=os.environ["LOCALLLM"], messages=[ { 'role': 'user', 'content': query } ])
|
25 |
return response['message']['content']
|
26 |
|
27 |
+
def callHfInferenceClientLLM(query):
|
28 |
+
client = InferenceClient(config.hfMoldel)
|
29 |
+
response = client.chat.completions.create(
|
30 |
+
messages = [ {"role": "user", "content": query } ],
|
31 |
+
stream=False, max_tokens=1024 )
|
32 |
+
return response.choices[0].message.content
|
33 |
|
34 |
if __name__ == "__main__":
|
35 |
+
os.environ["LOCALLLM"] = "llama3.2"
|
36 |
+
from ollama import chat as OllamaChat
|
37 |
+
response = callLLM("What is the capital of France?")
|
38 |
+
print(response)
|
39 |
response = callWebSearch("who is the president of France")
|
40 |
+
print(response)
|
41 |
+
response = callHfInferenceClientLLM("What is the capital of France?")
|
42 |
print(response)
|