Spaces:
Running
Running
shamik
commited on
feat: adding api key usage instead of using the default api key.
Browse files
src/insurance_assistants/agents.py
CHANGED
@@ -1,8 +1,7 @@
|
|
|
|
1 |
from pathlib import Path
|
2 |
|
3 |
-
|
4 |
-
import os
|
5 |
-
from huggingface_hub import InferenceClient, login
|
6 |
from smolagents import (
|
7 |
CodeAgent,
|
8 |
DuckDuckGoSearchTool,
|
@@ -24,6 +23,7 @@ rag_app = RAG()
|
|
24 |
rag_app.vectordb_id = "policy_wordings"
|
25 |
# login(os.getenv("HF_TOKEN"))
|
26 |
|
|
|
27 |
class InsuranceInfoRetriever(Tool):
|
28 |
name = "InsuranceInfoRetriever"
|
29 |
description = "Retrieves information from insurance documents."
|
@@ -35,7 +35,7 @@ class InsuranceInfoRetriever(Tool):
|
|
35 |
def forward(self, query: str) -> str:
|
36 |
client = InferenceClient(
|
37 |
provider="hyperbolic",
|
38 |
-
bill_to="VitalNest",
|
39 |
# token=os.getenv("HF_TOKEN")
|
40 |
)
|
41 |
results = rag_app.search_documents(query)
|
@@ -73,9 +73,11 @@ class InsuranceInfoRetriever(Tool):
|
|
73 |
|
74 |
insurance_agent = CodeAgent(
|
75 |
tools=[InsuranceInfoRetriever(), FinalAnswerTool()],
|
76 |
-
model=InferenceClientModel(
|
77 |
-
|
78 |
-
|
|
|
|
|
79 |
additional_authorized_imports=["os", "requests", "bs4", "pil", "base64", "io"],
|
80 |
max_steps=1,
|
81 |
verbosity_level=-1,
|
@@ -85,7 +87,9 @@ insurance_agent = CodeAgent(
|
|
85 |
)
|
86 |
websearch_agent = ToolCallingAgent(
|
87 |
model=InferenceClientModel(
|
88 |
-
model_id="Qwen/Qwen3-30B-A3B",
|
|
|
|
|
89 |
# token=os.getenv("HF_TOKEN")
|
90 |
),
|
91 |
tools=[
|
@@ -102,7 +106,8 @@ websearch_agent = ToolCallingAgent(
|
|
102 |
|
103 |
wikipedia_agent = ToolCallingAgent(
|
104 |
model=InferenceClientModel(
|
105 |
-
model_id="Qwen/Qwen3-30B-A3B",
|
|
|
106 |
# token=os.getenv("HF_TOKEN")
|
107 |
),
|
108 |
tools=[
|
@@ -120,7 +125,7 @@ manager_agent = CodeAgent(
|
|
120 |
additional_authorized_imports=["os"],
|
121 |
model=InferenceClientModel(
|
122 |
model_id="Qwen/Qwen3-235B-A22B",
|
123 |
-
bill_to="VitalNest",
|
124 |
temperature=0.1,
|
125 |
# token=os.getenv("HF_TOKEN")
|
126 |
),
|
@@ -134,4 +139,3 @@ manager_agent = CodeAgent(
|
|
134 |
"health insurance documents using the `insurance_agent` team member, search wikipedia and the web for general information.",
|
135 |
)
|
136 |
manager_agent.system_prompt = manager_agent.system_prompt + PROMPT_PREFIX
|
137 |
-
|
|
|
1 |
+
# from dotenv import find_dotenv, load_dotenv
|
2 |
from pathlib import Path
|
3 |
|
4 |
+
from huggingface_hub import InferenceClient # , login
|
|
|
|
|
5 |
from smolagents import (
|
6 |
CodeAgent,
|
7 |
DuckDuckGoSearchTool,
|
|
|
23 |
rag_app.vectordb_id = "policy_wordings"
|
24 |
# login(os.getenv("HF_TOKEN"))
|
25 |
|
26 |
+
|
27 |
class InsuranceInfoRetriever(Tool):
|
28 |
name = "InsuranceInfoRetriever"
|
29 |
description = "Retrieves information from insurance documents."
|
|
|
35 |
def forward(self, query: str) -> str:
|
36 |
client = InferenceClient(
|
37 |
provider="hyperbolic",
|
38 |
+
# bill_to="VitalNest",
|
39 |
# token=os.getenv("HF_TOKEN")
|
40 |
)
|
41 |
results = rag_app.search_documents(query)
|
|
|
73 |
|
74 |
insurance_agent = CodeAgent(
|
75 |
tools=[InsuranceInfoRetriever(), FinalAnswerTool()],
|
76 |
+
model=InferenceClientModel(
|
77 |
+
# bill_to="VitalNest",
|
78 |
+
temperature=0.1,
|
79 |
+
# token=os.getenv("HF_TOKEN")
|
80 |
+
),
|
81 |
additional_authorized_imports=["os", "requests", "bs4", "pil", "base64", "io"],
|
82 |
max_steps=1,
|
83 |
verbosity_level=-1,
|
|
|
87 |
)
|
88 |
websearch_agent = ToolCallingAgent(
|
89 |
model=InferenceClientModel(
|
90 |
+
model_id="Qwen/Qwen3-30B-A3B",
|
91 |
+
# bill_to="VitalNest",
|
92 |
+
temperature=0.1,
|
93 |
# token=os.getenv("HF_TOKEN")
|
94 |
),
|
95 |
tools=[
|
|
|
106 |
|
107 |
wikipedia_agent = ToolCallingAgent(
|
108 |
model=InferenceClientModel(
|
109 |
+
model_id="Qwen/Qwen3-30B-A3B",
|
110 |
+
# bill_to="VitalNest", temperature=0.1,
|
111 |
# token=os.getenv("HF_TOKEN")
|
112 |
),
|
113 |
tools=[
|
|
|
125 |
additional_authorized_imports=["os"],
|
126 |
model=InferenceClientModel(
|
127 |
model_id="Qwen/Qwen3-235B-A22B",
|
128 |
+
# bill_to="VitalNest",
|
129 |
temperature=0.1,
|
130 |
# token=os.getenv("HF_TOKEN")
|
131 |
),
|
|
|
139 |
"health insurance documents using the `insurance_agent` team member, search wikipedia and the web for general information.",
|
140 |
)
|
141 |
manager_agent.system_prompt = manager_agent.system_prompt + PROMPT_PREFIX
|
|
src/insurance_assistants/complex_rag.py
CHANGED
@@ -16,6 +16,7 @@ from colpali_engine.models import (
|
|
16 |
ColQwen2_5_Processor,
|
17 |
)
|
18 |
from colpali_engine.utils.torch_utils import ListDataset, get_torch_device
|
|
|
19 |
# from dotenv import find_dotenv, load_dotenv
|
20 |
# from openai import OpenAI
|
21 |
from pdf2image import convert_from_path
|
|
|
16 |
ColQwen2_5_Processor,
|
17 |
)
|
18 |
from colpali_engine.utils.torch_utils import ListDataset, get_torch_device
|
19 |
+
|
20 |
# from dotenv import find_dotenv, load_dotenv
|
21 |
# from openai import OpenAI
|
22 |
from pdf2image import convert_from_path
|
src/insurance_assistants/ui.py
CHANGED
@@ -5,16 +5,19 @@ import re
|
|
5 |
import shutil
|
6 |
|
7 |
import gradio as gr
|
8 |
-
from dotenv import load_dotenv
|
9 |
from gradio_pdf import PDF
|
|
|
10 |
from smolagents.gradio_ui import _process_action_step, _process_final_answer_step
|
11 |
from smolagents.memory import ActionStep, FinalAnswerStep, MemoryStep, PlanningStep
|
12 |
from smolagents.models import ChatMessageStreamDelta
|
13 |
-
from huggingface_hub import login
|
14 |
|
15 |
# from smolagents import CodeAgent, InferenceClientModel
|
16 |
from src.insurance_assistants.agents import manager_agent
|
17 |
-
from src.insurance_assistants.consts import
|
|
|
|
|
|
|
|
|
18 |
|
19 |
# load_dotenv(override=True)
|
20 |
|
@@ -90,11 +93,15 @@ class UI:
|
|
90 |
def interact_with_agent(self, prompt, messages, session_state, api_key):
|
91 |
# Get or create session-specific agent
|
92 |
if not api_key or not api_key.startswith("hf"):
|
93 |
-
raise ValueError("Incorrect API
|
|
|
|
|
94 |
if "agent" not in session_state:
|
95 |
# session_state["agent"] = CodeAgent(tools=[], model=InfenceClientModel())
|
96 |
session_state["agent"] = manager_agent
|
97 |
-
session_state["agent"].system_prompt =
|
|
|
|
|
98 |
|
99 |
# Adding monitoring
|
100 |
try:
|
@@ -223,9 +230,12 @@ class UI:
|
|
223 |
"""
|
224 |
)
|
225 |
with gr.Group():
|
226 |
-
api_key = gr.Textbox(
|
227 |
-
|
228 |
-
|
|
|
|
|
|
|
229 |
gr.Markdown(
|
230 |
value="**Your question, please...**", container=True
|
231 |
)
|
@@ -342,6 +352,8 @@ class UI:
|
|
342 |
demo.queue(max_size=4).launch(debug=False, **kwargs)
|
343 |
|
344 |
|
345 |
-
if __name__=="__main__":
|
346 |
-
|
347 |
-
|
|
|
|
|
|
5 |
import shutil
|
6 |
|
7 |
import gradio as gr
|
|
|
8 |
from gradio_pdf import PDF
|
9 |
+
from huggingface_hub import login
|
10 |
from smolagents.gradio_ui import _process_action_step, _process_final_answer_step
|
11 |
from smolagents.memory import ActionStep, FinalAnswerStep, MemoryStep, PlanningStep
|
12 |
from smolagents.models import ChatMessageStreamDelta
|
|
|
13 |
|
14 |
# from smolagents import CodeAgent, InferenceClientModel
|
15 |
from src.insurance_assistants.agents import manager_agent
|
16 |
+
from src.insurance_assistants.consts import (
|
17 |
+
PRIMARY_HEADING,
|
18 |
+
PROJECT_ROOT_DIR,
|
19 |
+
PROMPT_PREFIX,
|
20 |
+
)
|
21 |
|
22 |
# load_dotenv(override=True)
|
23 |
|
|
|
93 |
def interact_with_agent(self, prompt, messages, session_state, api_key):
|
94 |
# Get or create session-specific agent
|
95 |
if not api_key or not api_key.startswith("hf"):
|
96 |
+
raise ValueError("Incorrect HuggingFace Inference API Key")
|
97 |
+
# Login to Hugging Face with the provided API key
|
98 |
+
login(token=api_key)
|
99 |
if "agent" not in session_state:
|
100 |
# session_state["agent"] = CodeAgent(tools=[], model=InfenceClientModel())
|
101 |
session_state["agent"] = manager_agent
|
102 |
+
session_state["agent"].system_prompt = (
|
103 |
+
session_state["agent"].system_prompt + PROMPT_PREFIX
|
104 |
+
)
|
105 |
|
106 |
# Adding monitoring
|
107 |
try:
|
|
|
230 |
"""
|
231 |
)
|
232 |
with gr.Group():
|
233 |
+
api_key = gr.Textbox(
|
234 |
+
placeholder="Enter your HuggingFace Inference API KEY HERE",
|
235 |
+
label="🤗 Inference API Key",
|
236 |
+
show_label=True,
|
237 |
+
type="password",
|
238 |
+
)
|
239 |
gr.Markdown(
|
240 |
value="**Your question, please...**", container=True
|
241 |
)
|
|
|
352 |
demo.queue(max_size=4).launch(debug=False, **kwargs)
|
353 |
|
354 |
|
355 |
+
# if __name__ == "__main__":
|
356 |
+
# UI().launch(
|
357 |
+
# share=True,
|
358 |
+
# allowed_paths=[(PROJECT_ROOT_DIR / "data/policy_wordings").as_posix()],
|
359 |
+
# )
|