Update agent.py
Browse files
agent.py
CHANGED
@@ -15,7 +15,6 @@ from llama_index.tools.arxiv import ArxivToolSpec
|
|
15 |
import duckduckgo_search as ddg
|
16 |
import re
|
17 |
from llama_index.core.agent.workflow import ReActAgent
|
18 |
-
from llama_index.llms.openrouter import OpenRouter
|
19 |
import wandb
|
20 |
from llama_index.callbacks.wandb import WandbCallbackHandler
|
21 |
from llama_index.core.callbacks.base import CallbackManager
|
@@ -23,21 +22,13 @@ from llama_index.core.callbacks.llama_debug import LlamaDebugHandler
|
|
23 |
from llama_index.core import Settings
|
24 |
|
25 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
26 |
-
from llama_index.llms.
|
27 |
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
33 |
-
device_map="auto" # will use all available GPUs
|
34 |
-
)
|
35 |
-
|
36 |
-
proj_llm = HuggingFaceLLM(
|
37 |
-
model=model,
|
38 |
-
tokenizer=tokenizer,
|
39 |
-
device_map="auto", # ensures multi-GPU support
|
40 |
-
generate_kwargs={"temperature": 0.7, "top_p": 0.95}
|
41 |
)
|
42 |
|
43 |
embed_model = HuggingFaceEmbedding("BAAI/bge-small-en-v1.5")
|
|
|
15 |
import duckduckgo_search as ddg
|
16 |
import re
|
17 |
from llama_index.core.agent.workflow import ReActAgent
|
|
|
18 |
import wandb
|
19 |
from llama_index.callbacks.wandb import WandbCallbackHandler
|
20 |
from llama_index.core.callbacks.base import CallbackManager
|
|
|
22 |
from llama_index.core import Settings
|
23 |
|
24 |
from transformers import AutoModelForCausalLM, AutoTokenizer
|
25 |
+
from llama_index.llms.vllm import Vllm
|
26 |
|
27 |
+
llm = Vllm(
|
28 |
+
model="mistralai/Pixtral-12B-2409",
|
29 |
+
tensor_parallel_size=2, # For two GPUs
|
30 |
+
max_new_tokens=512,
|
31 |
+
vllm_kwargs={"swap_space": 1, "gpu_memory_utilization": 0.9},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
)
|
33 |
|
34 |
embed_model = HuggingFaceEmbedding("BAAI/bge-small-en-v1.5")
|