cboettig commited on
Commit
bb9c2bf
·
1 Parent(s): 2a42dca

local gpu use

Browse files
Files changed (2) hide show
  1. gpu-requirements.txt +1 -0
  2. vllm-tutorial.ipynb +641 -0
gpu-requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ vllm
vllm-tutorial.ipynb ADDED
@@ -0,0 +1,641 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 10,
6
+ "id": "405bc169-e0b7-48e6-84b8-4e4a791cf61a",
7
+ "metadata": {
8
+ "scrolled": true
9
+ },
10
+ "outputs": [
11
+ {
12
+ "name": "stdout",
13
+ "output_type": "stream",
14
+ "text": [
15
+ "INFO 06-07 04:15:58 [__init__.py:243] Automatically detected platform cuda.\n",
16
+ "INFO 06-07 04:16:02 [__init__.py:31] Available plugins for group vllm.general_plugins:\n",
17
+ "INFO 06-07 04:16:02 [__init__.py:33] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver\n",
18
+ "INFO 06-07 04:16:02 [__init__.py:36] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load.\n",
19
+ "INFO 06-07 04:16:03 [api_server.py:1289] vLLM API server version 0.9.0.1\n",
20
+ "INFO 06-07 04:16:03 [cli_args.py:300] non-default args: {'host': '0.0.0.0', 'task': 'embed', 'trust_remote_code': True, 'enforce_eager': True, 'tensor_parallel_size': 2, 'gpu_memory_utilization': 0.4}\n",
21
+ "WARNING 06-07 04:16:14 [config.py:907] awq quantization is not fully optimized yet. The speed can be slower than non-quantized models.\n",
22
+ "WARNING 06-07 04:16:14 [arg_utils.py:1583] Compute Capability < 8.0 is not supported by the V1 Engine. Falling back to V0. \n",
23
+ "WARNING 06-07 04:16:14 [arg_utils.py:1431] The model has a long context length (40960). This may causeOOM during the initial memory profiling phase, or result in low performance due to small KV cache size. Consider setting --max-model-len to a smaller value.\n",
24
+ "INFO 06-07 04:16:14 [config.py:1875] Defaulting to use mp for distributed inference\n",
25
+ "WARNING 06-07 04:16:14 [cuda.py:87] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
26
+ "INFO 06-07 04:16:14 [api_server.py:257] Started engine process with PID 13896\n",
27
+ "INFO 06-07 04:16:18 [__init__.py:243] Automatically detected platform cuda.\n",
28
+ "INFO 06-07 04:16:21 [__init__.py:31] Available plugins for group vllm.general_plugins:\n",
29
+ "INFO 06-07 04:16:21 [__init__.py:33] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver\n",
30
+ "INFO 06-07 04:16:21 [__init__.py:36] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load.\n",
31
+ "INFO 06-07 04:16:21 [llm_engine.py:230] Initializing a V0 LLM engine (v0.9.0.1) with config: model='Qwen/Qwen3-32B-AWQ', speculative_config=None, tokenizer='Qwen/Qwen3-32B-AWQ', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.float16, max_seq_len=40960, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=2, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=awq, enforce_eager=True, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=Qwen/Qwen3-32B-AWQ, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=None, chunked_prefill_enabled=False, use_async_output_proc=False, pooler_config=PoolerConfig(pooling_type=None, normalize=None, softmax=None, step_tag_id=None, returned_token_ids=None), compilation_config={\"compile_sizes\": [], \"inductor_compile_config\": {\"enable_auto_functionalized_v2\": false}, \"cudagraph_capture_sizes\": [], \"max_capture_size\": 0}, use_cached_outputs=True, \n",
32
+ "WARNING 06-07 04:16:22 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 64 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.\n",
33
+ "INFO 06-07 04:16:22 [cuda.py:240] Cannot use FlashAttention-2 backend for Volta and Turing GPUs.\n",
34
+ "INFO 06-07 04:16:22 [cuda.py:289] Using XFormers backend.\n",
35
+ "INFO 06-07 04:16:27 [__init__.py:243] Automatically detected platform cuda.\n",
36
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:30 [multiproc_worker_utils.py:225] Worker ready; awaiting tasks\n",
37
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:30 [__init__.py:31] Available plugins for group vllm.general_plugins:\n",
38
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:30 [__init__.py:33] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver\n",
39
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:30 [__init__.py:36] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load.\n",
40
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:30 [cuda.py:240] Cannot use FlashAttention-2 backend for Volta and Turing GPUs.\n",
41
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:30 [cuda.py:289] Using XFormers backend.\n",
42
+ "INFO 06-07 04:16:31 [utils.py:1077] Found nccl from library libnccl.so.2\n",
43
+ "INFO 06-07 04:16:31 [pynccl.py:69] vLLM is using nccl==2.26.2\n",
44
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:31 [utils.py:1077] Found nccl from library libnccl.so.2\n",
45
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:31 [pynccl.py:69] vLLM is using nccl==2.26.2\n",
46
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:31 [custom_all_reduce_utils.py:245] reading GPU P2P access cache from /home/jovyan/.cache/vllm/gpu_p2p_access_cache_for_0,1.json\n",
47
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m WARNING 06-07 04:16:31 [custom_all_reduce.py:146] Custom allreduce is disabled because your platform lacks GPU P2P capability or P2P test failed. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
48
+ "INFO 06-07 04:16:31 [custom_all_reduce_utils.py:245] reading GPU P2P access cache from /home/jovyan/.cache/vllm/gpu_p2p_access_cache_for_0,1.json\n",
49
+ "WARNING 06-07 04:16:31 [custom_all_reduce.py:146] Custom allreduce is disabled because your platform lacks GPU P2P capability or P2P test failed. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
50
+ "INFO 06-07 04:16:31 [shm_broadcast.py:250] vLLM message queue communication handle: Handle(local_reader_ranks=[1], buffer_handle=(1, 4194304, 6, 'psm_8808788c'), local_subscribe_addr='ipc:///tmp/f2f3507a-b619-4382-897c-4059a5a27e80', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
51
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:31 [parallel_state.py:1064] rank 1 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 1, EP rank 1\n",
52
+ "INFO 06-07 04:16:31 [parallel_state.py:1064] rank 0 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0\n",
53
+ "INFO 06-07 04:16:31 [model_runner.py:1170] Starting to load model Qwen/Qwen3-32B-AWQ...\n",
54
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:31 [model_runner.py:1170] Starting to load model Qwen/Qwen3-32B-AWQ...\n",
55
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:16:32 [weight_utils.py:291] Using model weights format ['*.safetensors']\n",
56
+ "INFO 06-07 04:16:32 [weight_utils.py:291] Using model weights format ['*.safetensors']\n"
57
+ ]
58
+ },
59
+ {
60
+ "name": "stderr",
61
+ "output_type": "stream",
62
+ "text": [
63
+ "Loading safetensors checkpoint shards: 0% Completed | 0/4 [00:00<?, ?it/s]\n",
64
+ "Loading safetensors checkpoint shards: 25% Completed | 1/4 [00:04<00:13, 4.64s/it]\n",
65
+ "Loading safetensors checkpoint shards: 50% Completed | 2/4 [00:13<00:13, 6.86s/it]\n",
66
+ "Loading safetensors checkpoint shards: 75% Completed | 3/4 [00:21<00:07, 7.69s/it]\n",
67
+ "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:29<00:00, 7.84s/it]\n",
68
+ "Loading safetensors checkpoint shards: 100% Completed | 4/4 [00:29<00:00, 7.45s/it]\n",
69
+ "\n"
70
+ ]
71
+ },
72
+ {
73
+ "name": "stdout",
74
+ "output_type": "stream",
75
+ "text": [
76
+ "INFO 06-07 04:17:02 [default_loader.py:280] Loading weights took 29.98 seconds\n",
77
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:17:02 [default_loader.py:280] Loading weights took 30.20 seconds\n",
78
+ "INFO 06-07 04:17:02 [model_runner.py:1202] Model loading took 8.3324 GiB and 31.055884 seconds\n",
79
+ "\u001b[1;36m(VllmWorkerProcess pid=14061)\u001b[0;0m INFO 06-07 04:17:02 [model_runner.py:1202] Model loading took 8.3324 GiB and 31.096897 seconds\n",
80
+ "INFO 06-07 04:17:03 [api_server.py:1336] Starting vLLM API server on http://0.0.0.0:8000\n",
81
+ "INFO 06-07 04:17:03 [launcher.py:28] Available routes are:\n",
82
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /openapi.json, Methods: GET, HEAD\n",
83
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /docs, Methods: GET, HEAD\n",
84
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /docs/oauth2-redirect, Methods: GET, HEAD\n",
85
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /redoc, Methods: GET, HEAD\n",
86
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /health, Methods: GET\n",
87
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /load, Methods: GET\n",
88
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /ping, Methods: POST\n",
89
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /ping, Methods: GET\n",
90
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /tokenize, Methods: POST\n",
91
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /detokenize, Methods: POST\n",
92
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /v1/models, Methods: GET\n",
93
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /version, Methods: GET\n",
94
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /v1/chat/completions, Methods: POST\n",
95
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /v1/completions, Methods: POST\n",
96
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /v1/embeddings, Methods: POST\n",
97
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /pooling, Methods: POST\n",
98
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /classify, Methods: POST\n",
99
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /score, Methods: POST\n",
100
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /v1/score, Methods: POST\n",
101
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /v1/audio/transcriptions, Methods: POST\n",
102
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /rerank, Methods: POST\n",
103
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /v1/rerank, Methods: POST\n",
104
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /v2/rerank, Methods: POST\n",
105
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /invocations, Methods: POST\n",
106
+ "INFO 06-07 04:17:03 [launcher.py:36] Route: /metrics, Methods: GET\n"
107
+ ]
108
+ },
109
+ {
110
+ "name": "stderr",
111
+ "output_type": "stream",
112
+ "text": [
113
+ "INFO: Started server process [13556]\n",
114
+ "INFO: Waiting for application startup.\n",
115
+ "INFO: Application startup complete.\n"
116
+ ]
117
+ }
118
+ ],
119
+ "source": [
120
+ "import os\n",
121
+ "import subprocess\n",
122
+ "import threading\n",
123
+ "import time\n",
124
+ "\n",
125
+ "# Set environment variable we need to support dual-GPU on Cirrus\n",
126
+ "os.environ[\"NCCL_P2P_LEVEL\"] = \"NVL\"\n",
127
+ "\n",
128
+ "def run_vllm_server():\n",
129
+ " subprocess.run([\n",
130
+ " \"vllm\", \"serve\", \"Qwen/Qwen3-32B-AWQ\",\n",
131
+ " \"--host\", \"0.0.0.0\",\n",
132
+ " \"--port\", \"8000\",\n",
133
+ " \"--tensor-parallel-size\", \"2\",\n",
134
+ " \"--trust-remote-code\",\n",
135
+ " \"--gpu-memory-utilization\", \"0.4\",\n",
136
+ " \"--enforce-eager\",\n",
137
+ " \"--task\", \"embed\"\n",
138
+ " ])\n",
139
+ "\n",
140
+ "# Start server in daemon thread\n",
141
+ "server_thread = threading.Thread(target=run_vllm_server, daemon=True)\n",
142
+ "server_thread.start()\n",
143
+ "\n",
144
+ "## give server time to start up.\n",
145
+ "\n",
146
+ "import time\n",
147
+ "# Pause execution for 100 seconds\n",
148
+ "time.sleep(200)"
149
+ ]
150
+ },
151
+ {
152
+ "cell_type": "code",
153
+ "execution_count": null,
154
+ "id": "9a8397fa-6896-40a5-97d9-1d0c98797b35",
155
+ "metadata": {},
156
+ "outputs": [],
157
+ "source": [
158
+ "## wait for output above to print routes, ending with: \n",
159
+ "## INFO: Application startup complete.\n"
160
+ ]
161
+ },
162
+ {
163
+ "cell_type": "code",
164
+ "execution_count": 2,
165
+ "id": "24b64902-1305-43e7-9da8-e4d82d097cb5",
166
+ "metadata": {},
167
+ "outputs": [
168
+ {
169
+ "name": "stdout",
170
+ "output_type": "stream",
171
+ "text": [
172
+ "INFO 06-07 04:02:50 [logger.py:42] Received request embd-32a68fa8f24a4855b090f66f426e61c4-0: prompt: ' product down', params: PoolingParams(dimensions=None, additional_metadata=None), prompt_token_ids: [1985, 1495], prompt_embeds shape: None, lora_request: None, prompt_adapter_request: None.\n",
173
+ "INFO 06-07 04:02:50 [engine.py:316] Added request embd-32a68fa8f24a4855b090f66f426e61c4-0.\n",
174
+ "INFO 06-07 04:02:52 [metrics.py:486] Avg prompt throughput: 0.2 tokens/s, Avg generation throughput: 0.1 tokens/s, Running: 0 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.0%, CPU KV cache usage: 0.0%.\n",
175
+ "INFO: 127.0.0.1:37090 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
176
+ ]
177
+ }
178
+ ],
179
+ "source": [
180
+ "## NOTE! You must wait until the log above finishes and not just the cell.\n",
181
+ "## Connect to the local model\n",
182
+ "from langchain_openai import OpenAIEmbeddings\n",
183
+ "embedding = OpenAIEmbeddings(\n",
184
+ " model = \"Qwen/Qwen3-32B-AWQ\",\n",
185
+ " api_key = \"EMPTY\",\n",
186
+ " base_url = \"http://localhost:8000/v1\",\n",
187
+ ")\n",
188
+ "\n",
189
+ "## test that we can do embeddings\n",
190
+ "from langchain_core.vectorstores import InMemoryVectorStore\n",
191
+ "vectorstore = InMemoryVectorStore.from_texts([\"test text\"], embedding=embedding)"
192
+ ]
193
+ },
194
+ {
195
+ "cell_type": "code",
196
+ "execution_count": 4,
197
+ "id": "95ed10f3-5339-40cd-bf16-b0854f8b4b91",
198
+ "metadata": {},
199
+ "outputs": [],
200
+ "source": [
201
+ "import os\n",
202
+ "import requests\n",
203
+ "import zipfile\n",
204
+ "import pathlib\n",
205
+ "from langchain_community.document_loaders import PyPDFLoader\n",
206
+ "\n",
207
+ "def download_and_unzip(url, output_dir):\n",
208
+ " response = requests.get(url)\n",
209
+ " zip_file_path = os.path.basename(url)\n",
210
+ " with open(zip_file_path, 'wb') as f:\n",
211
+ " f.write(response.content)\n",
212
+ " with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:\n",
213
+ " zip_ref.extractall(output_dir)\n",
214
+ " os.remove(zip_file_path)\n",
215
+ "\n",
216
+ "def pdf_loader(path):\n",
217
+ " all_documents = []\n",
218
+ " docs_dir = pathlib.Path(path)\n",
219
+ " for file in docs_dir.iterdir():\n",
220
+ " loader = PyPDFLoader(file)\n",
221
+ " documents = loader.load()\n",
222
+ " all_documents.extend(documents)\n",
223
+ " return all_documents\n",
224
+ "\n",
225
+ "\n",
226
+ "download_and_unzip(\"https://minio.carlboettiger.info/public-data/hwc.zip\", 'hwc')\n",
227
+ "docs = pdf_loader('hwc/')"
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "code",
232
+ "execution_count": 9,
233
+ "id": "c6e99791-8f34-4722-9708-665e409c26bd",
234
+ "metadata": {},
235
+ "outputs": [],
236
+ "source": [
237
+ "# Set up the Chat model from one of the NRP models\n",
238
+ "\n",
239
+ "import os\n",
240
+ "api_key = os.getenv(\"OPENAI_API_KEY\")\n",
241
+ "\n",
242
+ "# see `curl -H \"Authorization: Bearer $OPENAI_API_KEY\" https://llm.nrp-nautilus.io/v1/models`\n",
243
+ "models = {\"llama3\": \"llama3-sdsc\", \n",
244
+ " \"deepseek-small\": \"DeepSeek-R1-Distill-Qwen-32B\",\n",
245
+ " \"deepseek\": \"deepseek-r1-qwen-qualcomm\",\n",
246
+ " \"gemma3\": \"gemma3\",\n",
247
+ " \"phi3\": \"phi3\",\n",
248
+ " \"olmo\": \"olmo\"\n",
249
+ " }\n",
250
+ "\n",
251
+ "from langchain_openai import ChatOpenAI\n",
252
+ "llm = ChatOpenAI(model = models[\"gemma3\"], \n",
253
+ " api_key = api_key, \n",
254
+ " base_url = \"https://llm.nrp-nautilus.io\", \n",
255
+ " temperature=0)\n",
256
+ "\n",
257
+ "# Embedding model from NRP usually times out.\n",
258
+ "#embedding = OpenAIEmbeddings(model = \"embed-mistral\", api_key = api_key, base_url = \"https://llm.nrp-nautilus.io\")\n"
259
+ ]
260
+ },
261
+ {
262
+ "cell_type": "code",
263
+ "execution_count": null,
264
+ "id": "95d3e9a3-7334-44ba-a4bc-e7bfc4076358",
265
+ "metadata": {},
266
+ "outputs": [],
267
+ "source": [
268
+ "# Build a retrival agent\n",
269
+ "from langchain_core.vectorstores import InMemoryVectorStore\n",
270
+ "from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
271
+ "text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)\n",
272
+ "splits = text_splitter.split_documents(docs)"
273
+ ]
274
+ },
275
+ {
276
+ "cell_type": "code",
277
+ "execution_count": null,
278
+ "id": "fd8bcc13-d06d-43dd-9e06-4f29da803133",
279
+ "metadata": {
280
+ "scrolled": true
281
+ },
282
+ "outputs": [],
283
+ "source": [
284
+ "# slow part here, runs on remote GPU\n",
285
+ "from langchain_core.vectorstores import InMemoryVectorStore\n",
286
+ "vectorstore = InMemoryVectorStore.from_documents(documents=splits, embedding=embedding)\n",
287
+ "retriever = vectorstore.as_retriever()"
288
+ ]
289
+ },
290
+ {
291
+ "cell_type": "code",
292
+ "execution_count": null,
293
+ "id": "2bf50abf-5ccd-4de5-9fc4-c9043a66a108",
294
+ "metadata": {},
295
+ "outputs": [],
296
+ "source": [
297
+ "from langchain.chains import create_retrieval_chain\n",
298
+ "from langchain.chains.combine_documents import create_stuff_documents_chain\n",
299
+ "from langchain_core.prompts import ChatPromptTemplate\n",
300
+ "system_prompt = (\n",
301
+ " \"You are an assistant for question-answering tasks. \"\n",
302
+ " \"Use the following pieces of retrieved context to answer \"\n",
303
+ " \"the question. If you don't know the answer, say that you \"\n",
304
+ " \"don't know. Use three sentences maximum and keep the \"\n",
305
+ " \"answer concise.\"\n",
306
+ " \"\\n\\n\"\n",
307
+ " \"{context}\"\n",
308
+ ")\n",
309
+ "prompt = ChatPromptTemplate.from_messages(\n",
310
+ " [\n",
311
+ " (\"system\", system_prompt),\n",
312
+ " (\"human\", \"{input}\"),\n",
313
+ " ]\n",
314
+ ")\n",
315
+ "question_answer_chain = create_stuff_documents_chain(llm, prompt)\n",
316
+ "rag_chain = create_retrieval_chain(retriever, question_answer_chain)\n"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "code",
321
+ "execution_count": null,
322
+ "id": "e15c64e7-0916-4042-8274-870e4fdb1af7",
323
+ "metadata": {},
324
+ "outputs": [],
325
+ "source": [
326
+ "prompt = \"I live in Tanzania and am having issues with lions breaking into my boma and preying on cattle. What interventions might work best for me?\"\n",
327
+ "results = rag_chain.invoke({\"input\": prompt})\n",
328
+ "results"
329
+ ]
330
+ },
331
+ {
332
+ "cell_type": "code",
333
+ "execution_count": null,
334
+ "id": "35613607-2c36-4761-a8ea-8c0889530f34",
335
+ "metadata": {},
336
+ "outputs": [],
337
+ "source": [
338
+ "prompt = \"What are the most cost-effective prevention methods for elephants raiding my crops?\"\n",
339
+ "\n",
340
+ "results = rag_chain.invoke({\"input\": prompt})\n",
341
+ "results"
342
+ ]
343
+ },
344
+ {
345
+ "cell_type": "code",
346
+ "execution_count": null,
347
+ "id": "3dfc39f6-86e9-47c3-ab67-08f90ebbb823",
348
+ "metadata": {},
349
+ "outputs": [],
350
+ "source": [
351
+ "rag_chain.invoke({\"input\": \n",
352
+ " \"I have a small herd of goats and cattle and I am worried about jaguars preying on them. What preventative measures can I take?\"\n",
353
+ " })"
354
+ ]
355
+ },
356
+ {
357
+ "cell_type": "code",
358
+ "execution_count": null,
359
+ "id": "56091874-0e41-4b35-be4f-08d8ec6faf56",
360
+ "metadata": {},
361
+ "outputs": [],
362
+ "source": [
363
+ "rag_chain.invoke({\"input\": \"I am trying to prevent coyotes from eating the calves of my free-range cattle. What may work best?\"})"
364
+ ]
365
+ },
366
+ {
367
+ "cell_type": "code",
368
+ "execution_count": null,
369
+ "id": "918dc691-6c66-46b2-8930-01dbeb6f712b",
370
+ "metadata": {},
371
+ "outputs": [],
372
+ "source": [
373
+ "rag_chain.invoke({\"input\": \"We have major issues with deer raiding our large agricultural fields. Is there anything I can try to prevent this that won’t break the bank?\"})"
374
+ ]
375
+ },
376
+ {
377
+ "cell_type": "code",
378
+ "execution_count": null,
379
+ "id": "07b9578c-9a89-4874-a34d-30a060ed3407",
380
+ "metadata": {},
381
+ "outputs": [],
382
+ "source": [
383
+ "rag_chain.invoke({\"input\": \"We live in a suburban area and bears sometimes come into our town to eat from our fruit trees and trash. What are the best ways for us to prevent this as a community? We don’t want to have to get rid of our fruit trees…\"})"
384
+ ]
385
+ },
386
+ {
387
+ "cell_type": "code",
388
+ "execution_count": null,
389
+ "id": "ba272b88-1622-4d06-9361-7f1e2ca89e73",
390
+ "metadata": {},
391
+ "outputs": [],
392
+ "source": [
393
+ "prompt = \"What cattle husbandry strategies might be helpful to prevent conflict if we live in wolf country?\"\n",
394
+ "\n",
395
+ "rag_chain.invoke({\"input\": prompt})"
396
+ ]
397
+ },
398
+ {
399
+ "cell_type": "code",
400
+ "execution_count": null,
401
+ "id": "9d4d1bf4-4084-430d-8b2d-39ce1d6815db",
402
+ "metadata": {},
403
+ "outputs": [],
404
+ "source": []
405
+ },
406
+ {
407
+ "cell_type": "code",
408
+ "execution_count": null,
409
+ "id": "d4bf2492-6852-43a7-8527-06ee4e9848c0",
410
+ "metadata": {},
411
+ "outputs": [],
412
+ "source": [
413
+ "## DRAFT exploring other embedding databases\n",
414
+ "\n",
415
+ "import os\n",
416
+ "from langchain_community.vectorstores import FAISS\n",
417
+ "from langchain_community.vectorstores import Chroma\n",
418
+ "from langchain_community.vectorstores import Qdrant\n",
419
+ "from qdrant_client import QdrantClient\n",
420
+ "from qdrant_client.models import Distance, VectorParams\n",
421
+ "import gc\n",
422
+ "import torch\n",
423
+ "\n",
424
+ "# Option 1: FAISS (Facebook AI Similarity Search) - Most memory efficient\n",
425
+ "def create_faiss_vectorstore(splits, embedding, persist_directory=\"./faiss_db\", batch_size=100):\n",
426
+ " \"\"\"\n",
427
+ " Create FAISS vector store with batched processing to minimize GPU RAM usage\n",
428
+ " \"\"\"\n",
429
+ " os.makedirs(persist_directory, exist_ok=True)\n",
430
+ " \n",
431
+ " # Process documents in batches to avoid GPU memory overflow\n",
432
+ " vectorstore = None\n",
433
+ " \n",
434
+ " for i in range(0, len(splits), batch_size):\n",
435
+ " batch = splits[i:i + batch_size]\n",
436
+ " print(f\"Processing batch {i//batch_size + 1}/{(len(splits) + batch_size - 1)//batch_size}\")\n",
437
+ " \n",
438
+ " if vectorstore is None:\n",
439
+ " # Create initial vectorstore with first batch\n",
440
+ " vectorstore = FAISS.from_documents(\n",
441
+ " documents=batch,\n",
442
+ " embedding=embedding\n",
443
+ " )\n",
444
+ " else:\n",
445
+ " # Add subsequent batches to existing vectorstore\n",
446
+ " batch_vectorstore = FAISS.from_documents(\n",
447
+ " documents=batch,\n",
448
+ " embedding=embedding\n",
449
+ " )\n",
450
+ " vectorstore.merge_from(batch_vectorstore)\n",
451
+ " \n",
452
+ " # Clean up temporary vectorstore\n",
453
+ " del batch_vectorstore\n",
454
+ " \n",
455
+ " # Force garbage collection and clear GPU cache if using CUDA\n",
456
+ " gc.collect()\n",
457
+ " if torch.cuda.is_available():\n",
458
+ " torch.cuda.empty_cache()\n",
459
+ " \n",
460
+ " # Save to disk\n",
461
+ " vectorstore.save_local(persist_directory)\n",
462
+ " print(f\"Vector store saved to {persist_directory}\")\n",
463
+ " \n",
464
+ " return vectorstore\n",
465
+ "\n",
466
+ "def load_faiss_vectorstore(embedding, persist_directory=\"./faiss_db\"):\n",
467
+ " \"\"\"Load existing FAISS vector store from disk\"\"\"\n",
468
+ " return FAISS.load_local(\n",
469
+ " persist_directory,\n",
470
+ " embedding,\n",
471
+ " allow_dangerous_deserialization=True # Only if you trust the source\n",
472
+ " )\n",
473
+ "\n",
474
+ "# Option 2: Chroma - Persistent SQLite-based storage\n",
475
+ "def create_chroma_vectorstore(splits, embedding, persist_directory=\"./chroma_db\", batch_size=100):\n",
476
+ " \"\"\"\n",
477
+ " Create Chroma vector store with batched processing\n",
478
+ " \"\"\"\n",
479
+ " # Initialize Chroma with persistence\n",
480
+ " vectorstore = Chroma(\n",
481
+ " persist_directory=persist_directory,\n",
482
+ " embedding_function=embedding\n",
483
+ " )\n",
484
+ " \n",
485
+ " # Add documents in batches\n",
486
+ " for i in range(0, len(splits), batch_size):\n",
487
+ " batch = splits[i:i + batch_size]\n",
488
+ " print(f\"Processing batch {i//batch_size + 1}/{(len(splits) + batch_size - 1)//batch_size}\")\n",
489
+ " \n",
490
+ " vectorstore.add_documents(batch)\n",
491
+ " \n",
492
+ " # Force garbage collection and clear GPU cache\n",
493
+ " gc.collect()\n",
494
+ " if torch.cuda.is_available():\n",
495
+ " torch.cuda.empty_cache()\n",
496
+ " \n",
497
+ " # Persist to disk\n",
498
+ " vectorstore.persist()\n",
499
+ " print(f\"Vector store persisted to {persist_directory}\")\n",
500
+ " \n",
501
+ " return vectorstore\n",
502
+ "\n",
503
+ "def load_chroma_vectorstore(embedding, persist_directory=\"./chroma_db\"):\n",
504
+ " \"\"\"Load existing Chroma vector store from disk\"\"\"\n",
505
+ " return Chroma(\n",
506
+ " persist_directory=persist_directory,\n",
507
+ " embedding_function=embedding\n",
508
+ " )\n",
509
+ "\n",
510
+ "# Option 3: Qdrant - High-performance vector database\n",
511
+ "def create_qdrant_vectorstore(splits, embedding, collection_name=\"documents\", \n",
512
+ " path=\"./qdrant_db\", batch_size=100):\n",
513
+ " \"\"\"\n",
514
+ " Create Qdrant vector store with local file-based storage\n",
515
+ " \"\"\"\n",
516
+ " # Initialize local Qdrant client\n",
517
+ " client = QdrantClient(path=path)\n",
518
+ " \n",
519
+ " # Get embedding dimension (embed a sample text)\n",
520
+ " sample_embedding = embedding.embed_query(\"sample text\")\n",
521
+ " embedding_dim = len(sample_embedding)\n",
522
+ " \n",
523
+ " # Create collection if it doesn't exist\n",
524
+ " try:\n",
525
+ " client.create_collection(\n",
526
+ " collection_name=collection_name,\n",
527
+ " vectors_config=VectorParams(size=embedding_dim, distance=Distance.COSINE)\n",
528
+ " )\n",
529
+ " except Exception as e:\n",
530
+ " print(f\"Collection might already exist: {e}\")\n",
531
+ " \n",
532
+ " # Create vectorstore\n",
533
+ " vectorstore = Qdrant(\n",
534
+ " client=client,\n",
535
+ " collection_name=collection_name,\n",
536
+ " embeddings=embedding\n",
537
+ " )\n",
538
+ " \n",
539
+ " # Add documents in batches\n",
540
+ " for i in range(0, len(splits), batch_size):\n",
541
+ " batch = splits[i:i + batch_size]\n",
542
+ " print(f\"Processing batch {i//batch_size + 1}/{(len(splits) + batch_size - 1)//batch_size}\")\n",
543
+ " \n",
544
+ " vectorstore.add_documents(batch)\n",
545
+ " \n",
546
+ " # Force garbage collection and clear GPU cache\n",
547
+ " gc.collect()\n",
548
+ " if torch.cuda.is_available():\n",
549
+ " torch.cuda.empty_cache()\n",
550
+ " \n",
551
+ " print(f\"Vector store created in {path}\")\n",
552
+ " return vectorstore\n",
553
+ "\n",
554
+ "def load_qdrant_vectorstore(embedding, collection_name=\"documents\", path=\"./qdrant_db\"):\n",
555
+ " \"\"\"Load existing Qdrant vector store from disk\"\"\"\n",
556
+ " client = QdrantClient(path=path)\n",
557
+ " return Qdrant(\n",
558
+ " client=client,\n",
559
+ " collection_name=collection_name,\n",
560
+ " embeddings=embedding\n",
561
+ " )\n"
562
+ ]
563
+ },
564
+ {
565
+ "cell_type": "code",
566
+ "execution_count": null,
567
+ "id": "3cf725ad-69a3-4abd-9907-52427babf6d5",
568
+ "metadata": {},
569
+ "outputs": [],
570
+ "source": [
571
+ "\n",
572
+ "# Usage examples:\n",
573
+ "\n",
574
+ "# Replace your original code with one of these options:\n",
575
+ "\n",
576
+ "# Option 1: FAISS (Recommended for most use cases)\n",
577
+ "vectorstore = create_faiss_vectorstore(\n",
578
+ " splits=splits, \n",
579
+ " embedding=embedding, \n",
580
+ " persist_directory=\"./my_faiss_db\",\n",
581
+ " batch_size=50 # Adjust based on your GPU memory\n",
582
+ ")\n",
583
+ "\n",
584
+ "# To load later:\n",
585
+ "# vectorstore = load_faiss_vectorstore(embedding, \"./my_faiss_db\")\n",
586
+ "\n",
587
+ "# Option 2: Chroma (Good for development and moderate scale)\n",
588
+ "# vectorstore = create_chroma_vectorstore(\n",
589
+ "# splits=splits,\n",
590
+ "# embedding=embedding,\n",
591
+ "# persist_directory=\"./my_chroma_db\",\n",
592
+ "# batch_size=50\n",
593
+ "# )\n",
594
+ "\n",
595
+ "# Option 3: Qdrant (Best for production and very large scale)\n",
596
+ "# vectorstore = create_qdrant_vectorstore(\n",
597
+ "# splits=splits,\n",
598
+ "# embedding=embedding,\n",
599
+ "# collection_name=\"my_documents\",\n",
600
+ "# path=\"./my_qdrant_db\",\n",
601
+ "# batch_size=50\n",
602
+ "# )\n",
603
+ "\n",
604
+ "# Memory optimization settings\n",
605
+ "def optimize_gpu_memory():\n",
606
+ " \"\"\"Additional GPU memory optimization\"\"\"\n",
607
+ " if torch.cuda.is_available():\n",
608
+ " # Set memory fraction if needed\n",
609
+ " torch.cuda.set_per_process_memory_fraction(0.8) # Use 80% of GPU memory\n",
610
+ " \n",
611
+ " # Enable memory mapping for large tensors\n",
612
+ " torch.backends.cuda.matmul.allow_tf32 = True\n",
613
+ " torch.backends.cudnn.allow_tf32 = True\n",
614
+ "\n",
615
+ "# Call before processing if you have GPU memory issues\n",
616
+ "# optimize_gpu_memory()"
617
+ ]
618
+ }
619
+ ],
620
+ "metadata": {
621
+ "kernelspec": {
622
+ "display_name": "Python 3 (ipykernel)",
623
+ "language": "python",
624
+ "name": "python3"
625
+ },
626
+ "language_info": {
627
+ "codemirror_mode": {
628
+ "name": "ipython",
629
+ "version": 3
630
+ },
631
+ "file_extension": ".py",
632
+ "mimetype": "text/x-python",
633
+ "name": "python",
634
+ "nbconvert_exporter": "python",
635
+ "pygments_lexer": "ipython3",
636
+ "version": "3.12.10"
637
+ }
638
+ },
639
+ "nbformat": 4,
640
+ "nbformat_minor": 5
641
+ }