Spaces:
Running
Running
File size: 32,983 Bytes
bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 022d1be bb9c2bf 022d1be bb9c2bf 022d1be bb9c2bf 022d1be bb9c2bf 022d1be bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 022d1be bb9c2bf 022d1be bb9c2bf 47a6902 022d1be 47a6902 022d1be bb9c2bf 47a6902 022d1be 47a6902 022d1be 47a6902 022d1be 47a6902 022d1be 47a6902 022d1be 47a6902 022d1be 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf 47a6902 bb9c2bf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 |
{
"cells": [
{
"cell_type": "code",
"execution_count": 8,
"id": "405bc169-e0b7-48e6-84b8-4e4a791cf61a",
"metadata": {
"scrolled": true
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO 06-09 04:41:03 [__init__.py:243] Automatically detected platform cuda.\n",
"INFO 06-09 04:41:06 [__init__.py:31] Available plugins for group vllm.general_plugins:\n",
"INFO 06-09 04:41:06 [__init__.py:33] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver\n",
"INFO 06-09 04:41:06 [__init__.py:36] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load.\n",
"INFO 06-09 04:41:07 [api_server.py:1289] vLLM API server version 0.9.0.1\n",
"INFO 06-09 04:41:08 [cli_args.py:300] non-default args: {'host': '0.0.0.0', 'task': 'embed', 'trust_remote_code': True, 'enforce_eager': True, 'served_model_name': ['local'], 'tensor_parallel_size': 2, 'gpu_memory_utilization': 0.4}\n",
"WARNING 06-09 04:41:08 [config.py:3096] Your Quadro RTX 8000 device (with compute capability 7.5) doesn't support torch.bfloat16. Falling back to torch.float16 for compatibility.\n",
"WARNING 06-09 04:41:08 [config.py:3135] Casting torch.bfloat16 to torch.float16.\n",
"INFO 06-09 04:41:17 [config.py:473] Found sentence-transformers modules configuration.\n",
"INFO 06-09 04:41:17 [config.py:493] Found pooling configuration.\n",
"WARNING 06-09 04:41:17 [arg_utils.py:1583] Compute Capability < 8.0 is not supported by the V1 Engine. Falling back to V0. \n",
"WARNING 06-09 04:41:17 [arg_utils.py:1431] The model has a long context length (40960). This may causeOOM during the initial memory profiling phase, or result in low performance due to small KV cache size. Consider setting --max-model-len to a smaller value.\n",
"INFO 06-09 04:41:17 [config.py:1875] Defaulting to use mp for distributed inference\n",
"WARNING 06-09 04:41:17 [cuda.py:87] To see benefits of async output processing, enable CUDA graph. Since, enforce-eager is enabled, async output processor cannot be used\n",
"INFO 06-09 04:41:17 [api_server.py:257] Started engine process with PID 84927\n",
"INFO 06-09 04:41:21 [__init__.py:243] Automatically detected platform cuda.\n",
"INFO 06-09 04:41:24 [__init__.py:31] Available plugins for group vllm.general_plugins:\n",
"INFO 06-09 04:41:24 [__init__.py:33] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver\n",
"INFO 06-09 04:41:24 [__init__.py:36] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load.\n",
"INFO 06-09 04:41:24 [llm_engine.py:230] Initializing a V0 LLM engine (v0.9.0.1) with config: model='Qwen/Qwen3-Embedding-4B', speculative_config=None, tokenizer='Qwen/Qwen3-Embedding-4B', skip_tokenizer_init=False, tokenizer_mode=auto, revision=None, override_neuron_config={}, tokenizer_revision=None, trust_remote_code=True, dtype=torch.float16, max_seq_len=40960, download_dir=None, load_format=LoadFormat.AUTO, tensor_parallel_size=2, pipeline_parallel_size=1, disable_custom_all_reduce=False, quantization=None, enforce_eager=True, kv_cache_dtype=auto, device_config=cuda, decoding_config=DecodingConfig(backend='auto', disable_fallback=False, disable_any_whitespace=False, disable_additional_properties=False, reasoning_backend=''), observability_config=ObservabilityConfig(show_hidden_metrics_for_version=None, otlp_traces_endpoint=None, collect_detailed_traces=None), seed=0, served_model_name=local, num_scheduler_steps=1, multi_step_stream_outputs=True, enable_prefix_caching=None, chunked_prefill_enabled=False, use_async_output_proc=False, pooler_config=PoolerConfig(pooling_type='LAST', normalize=True, softmax=None, step_tag_id=None, returned_token_ids=None), compilation_config={\"compile_sizes\": [], \"inductor_compile_config\": {\"enable_auto_functionalized_v2\": false}, \"cudagraph_capture_sizes\": [], \"max_capture_size\": 0}, use_cached_outputs=True, \n",
"WARNING 06-09 04:41:25 [multiproc_worker_utils.py:306] Reducing Torch parallelism from 64 threads to 1 to avoid unnecessary CPU contention. Set OMP_NUM_THREADS in the external environment to tune this value as needed.\n",
"INFO 06-09 04:41:25 [cuda.py:240] Cannot use FlashAttention-2 backend for Volta and Turing GPUs.\n",
"INFO 06-09 04:41:25 [cuda.py:289] Using XFormers backend.\n",
"INFO 06-09 04:41:29 [__init__.py:243] Automatically detected platform cuda.\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:32 [multiproc_worker_utils.py:225] Worker ready; awaiting tasks\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:32 [__init__.py:31] Available plugins for group vllm.general_plugins:\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:32 [__init__.py:33] - lora_filesystem_resolver -> vllm.plugins.lora_resolvers.filesystem_resolver:register_filesystem_resolver\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:32 [__init__.py:36] All plugins in this group will be loaded. Set `VLLM_PLUGINS` to control which plugins to load.\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:32 [cuda.py:240] Cannot use FlashAttention-2 backend for Volta and Turing GPUs.\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:32 [cuda.py:289] Using XFormers backend.\n",
"INFO 06-09 04:41:33 [utils.py:1077] Found nccl from library libnccl.so.2\n",
"INFO 06-09 04:41:33 [pynccl.py:69] vLLM is using nccl==2.26.2\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:33 [utils.py:1077] Found nccl from library libnccl.so.2\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:33 [pynccl.py:69] vLLM is using nccl==2.26.2\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:34 [custom_all_reduce_utils.py:245] reading GPU P2P access cache from /home/jovyan/.cache/vllm/gpu_p2p_access_cache_for_0,1.json\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m WARNING 06-09 04:41:34 [custom_all_reduce.py:146] Custom allreduce is disabled because your platform lacks GPU P2P capability or P2P test failed. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
"INFO 06-09 04:41:34 [custom_all_reduce_utils.py:245] reading GPU P2P access cache from /home/jovyan/.cache/vllm/gpu_p2p_access_cache_for_0,1.json\n",
"WARNING 06-09 04:41:34 [custom_all_reduce.py:146] Custom allreduce is disabled because your platform lacks GPU P2P capability or P2P test failed. To silence this warning, specify disable_custom_all_reduce=True explicitly.\n",
"INFO 06-09 04:41:34 [shm_broadcast.py:250] vLLM message queue communication handle: Handle(local_reader_ranks=[1], buffer_handle=(1, 4194304, 6, 'psm_523dee68'), local_subscribe_addr='ipc:///tmp/4d2d0127-8b88-42ce-ba52-5c7e4aac03b6', remote_subscribe_addr=None, remote_addr_ipv6=False)\n",
"INFO 06-09 04:41:34 [parallel_state.py:1064] rank 0 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 0, EP rank 0\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:34 [parallel_state.py:1064] rank 1 in world size 2 is assigned as DP rank 0, PP rank 0, TP rank 1, EP rank 1\n",
"INFO 06-09 04:41:34 [model_runner.py:1170] Starting to load model Qwen/Qwen3-Embedding-4B...\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:34 [model_runner.py:1170] Starting to load model Qwen/Qwen3-Embedding-4B...\n",
"INFO 06-09 04:41:34 [weight_utils.py:291] Using model weights format ['*.safetensors']\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Loading safetensors checkpoint shards: 0% Completed | 0/2 [00:00<?, ?it/s]\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:35 [weight_utils.py:291] Using model weights format ['*.safetensors']\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Loading safetensors checkpoint shards: 50% Completed | 1/2 [00:04<00:04, 4.64s/it]\n",
"Loading safetensors checkpoint shards: 100% Completed | 2/2 [00:11<00:00, 6.00s/it]\n",
"Loading safetensors checkpoint shards: 100% Completed | 2/2 [00:11<00:00, 5.79s/it]\n",
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:46 [default_loader.py:280] Loading weights took 11.40 seconds\n",
"INFO 06-09 04:41:46 [default_loader.py:280] Loading weights took 11.81 seconds\n",
"\u001b[1;36m(VllmWorkerProcess pid=85107)\u001b[0;0m INFO 06-09 04:41:47 [model_runner.py:1202] Model loading took 3.8162 GiB and 12.679641 seconds\n",
"INFO 06-09 04:41:47 [model_runner.py:1202] Model loading took 3.8162 GiB and 12.722510 seconds\n",
"INFO 06-09 04:41:47 [api_server.py:1336] Starting vLLM API server on http://0.0.0.0:8000\n",
"INFO 06-09 04:41:47 [launcher.py:28] Available routes are:\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /openapi.json, Methods: HEAD, GET\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /docs, Methods: HEAD, GET\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /docs/oauth2-redirect, Methods: HEAD, GET\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /redoc, Methods: HEAD, GET\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /health, Methods: GET\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /load, Methods: GET\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /ping, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /ping, Methods: GET\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /tokenize, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /detokenize, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /v1/models, Methods: GET\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /version, Methods: GET\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /v1/chat/completions, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /v1/completions, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /v1/embeddings, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /pooling, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /classify, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /score, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /v1/score, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /v1/audio/transcriptions, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /rerank, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /v1/rerank, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /v2/rerank, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /invocations, Methods: POST\n",
"INFO 06-09 04:41:47 [launcher.py:36] Route: /metrics, Methods: GET\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"INFO: Started server process [84514]\n",
"INFO: Waiting for application startup.\n",
"INFO: Application startup complete.\n"
]
}
],
"source": [
"import os\n",
"import subprocess\n",
"import threading\n",
"import time\n",
"\n",
"# Set environment variable we need to support dual-GPU on Cirrus\n",
"os.environ[\"NCCL_P2P_LEVEL\"] = \"NVL\"\n",
"os.environ[\"VLLM_API_KEY\"] = os.getenv(\"OPENAI_API_KEY\") # set same key for simplicity\n",
"\n",
"# https://huggingface.co/spaces/mteb/leaderboard \n",
"def run_vllm_server():\n",
" subprocess.run([\n",
" \"vllm\", \"serve\", \"Qwen/Qwen3-Embedding-4B\",\n",
" \"--host\", \"0.0.0.0\",\n",
" \"--port\", \"8000\",\n",
" \"--tensor-parallel-size\", \"2\",\n",
" \"--trust-remote-code\",\n",
" \"--gpu-memory-utilization\", \"0.4\",\n",
" \"--enforce-eager\",\n",
" \"--served-model-name\", \"local\",\n",
" \"--task\", \"embed\" # Run in embed mode! (default is 'generate')\n",
" ])\n",
"\n",
"# Start server in daemon thread\n",
"server_thread = threading.Thread(target=run_vllm_server, daemon=True)\n",
"server_thread.start()\n",
"\n",
"## give server time to start up:\n",
"import time\n",
"# Pause execution for 100 seconds (may take longer but usually 1 min is enough)\n",
"time.sleep(100)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "9a8397fa-6896-40a5-97d9-1d0c98797b35",
"metadata": {},
"outputs": [],
"source": [
"## wait for output above to print routes, ending with: \n",
"## INFO: Application startup complete.\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "24b64902-1305-43e7-9da8-e4d82d097cb5",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO 06-09 04:42:36 [logger.py:42] Received request embd-db52ec2756c34681b66267b7d54e1c82-0: prompt: ' product down', params: PoolingParams(dimensions=None, additional_metadata=None), prompt_token_ids: [1985, 1495], prompt_embeds shape: None, lora_request: None, prompt_adapter_request: None.\n",
"INFO 06-09 04:42:36 [engine.py:316] Added request embd-db52ec2756c34681b66267b7d54e1c82-0.\n",
"INFO 06-09 04:42:38 [metrics.py:486] Avg prompt throughput: 0.2 tokens/s, Avg generation throughput: 0.1 tokens/s, Running: 0 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.0%, CPU KV cache usage: 0.0%.\n",
"INFO: 127.0.0.1:43468 - \"POST /v1/embeddings HTTP/1.1\" 200 OK\n"
]
}
],
"source": [
"## NOTE! You must wait until the log above finishes and not just the cell.\n",
"## Connect to the local model\n",
"from langchain_openai import OpenAIEmbeddings\n",
"embedding = OpenAIEmbeddings(\n",
" model = \"local\", ## served model name\n",
" api_key = os.getenv(\"OPENAI_API_KEY\"),\n",
" base_url = \"http://localhost:8000/v1\",\n",
")\n",
"\n",
"## test that the model can do embeddings\n",
"from langchain_core.vectorstores import InMemoryVectorStore\n",
"vectorstore = InMemoryVectorStore.from_texts([\"test text\"], embedding=embedding)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "95ed10f3-5339-40cd-bf16-b0854f8b4b91",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"INFO 06-09 04:42:48 [metrics.py:486] Avg prompt throughput: 0.0 tokens/s, Avg generation throughput: 0.0 tokens/s, Running: 0 reqs, Swapped: 0 reqs, Pending: 0 reqs, GPU KV cache usage: 0.0%, CPU KV cache usage: 0.0%.\n"
]
}
],
"source": [
"import os\n",
"import requests\n",
"import zipfile\n",
"import pathlib\n",
"from langchain_community.document_loaders import PyPDFLoader\n",
"\n",
"def download_and_unzip(url, output_dir):\n",
" response = requests.get(url)\n",
" zip_file_path = os.path.basename(url)\n",
" with open(zip_file_path, 'wb') as f:\n",
" f.write(response.content)\n",
" with zipfile.ZipFile(zip_file_path, 'r') as zip_ref:\n",
" zip_ref.extractall(output_dir)\n",
" os.remove(zip_file_path)\n",
"\n",
"def pdf_loader(path):\n",
" all_documents = []\n",
" docs_dir = pathlib.Path(path)\n",
" for file in docs_dir.iterdir():\n",
" loader = PyPDFLoader(file)\n",
" documents = loader.load()\n",
" all_documents.extend(documents)\n",
" return all_documents\n",
"\n",
"download_and_unzip(\"https://minio.carlboettiger.info/public-data/hwc.zip\", 'hwc')\n",
"docs = pdf_loader('hwc/')"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "631a65a5",
"metadata": {},
"outputs": [],
"source": [
"# Split the parsed pdf documents\n",
"\n",
"from langchain_text_splitters import RecursiveCharacterTextSplitter\n",
"text_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=200)\n",
"splits = text_splitter.split_documents(docs)\n"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "b7b8973b",
"metadata": {},
"outputs": [],
"source": [
"\n",
"# create an id for each split\n",
"from uuid import uuid4\n",
"uuids = [str(uuid4()) for _ in range(len(splits))]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "24facce1",
"metadata": {},
"outputs": [],
"source": [
"from langchain_qdrant import QdrantVectorStore\n",
"from qdrant_client import QdrantClient\n",
"from qdrant_client.http.models import Distance, VectorParams\n",
"\n",
"client = QdrantClient(path = \"hwc_qdrant.db\")\n",
"\n",
"# create a new store\n",
"client.create_collection(\n",
" collection_name=\"demo_collection\",\n",
" vectors_config=VectorParams(size=2560, distance=Distance.COSINE),\n",
")\n",
"\n",
"# can connect to an existing store\n",
"vector_store = QdrantVectorStore(\n",
" client=client,\n",
" collection_name=\"demo_collection\",\n",
" embedding=embedding\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "38f5e60e",
"metadata": {},
"outputs": [],
"source": [
"\n",
"# slow part here, runs on remote GPU\n",
"vector_store.add_documents(documents=splits, ids=uuids)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fd8bcc13-d06d-43dd-9e06-4f29da803133",
"metadata": {
"scrolled": true
},
"outputs": [],
"source": [
"# slow part here, runs on remote GPU\n",
"# vectorstore = vector_store.from_documents(documents=splits, embedding = embedding)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "c6e99791-8f34-4722-9708-665e409c26bd",
"metadata": {},
"outputs": [],
"source": [
"# Set up the Chat model from one of the NRP models\n",
"import os\n",
"api_key = os.getenv(\"OPENAI_API_KEY\")\n",
"\n",
"# see `curl -H \"Authorization: Bearer $OPENAI_API_KEY\" https://llm.nrp-nautilus.io/v1/models`\n",
"models = {\"llama3\": \"llama3-sdsc\", \n",
" \"deepseek-small\": \"DeepSeek-R1-Distill-Qwen-32B\",\n",
" \"deepseek\": \"deepseek-r1-qwen-qualcomm\",\n",
" \"gemma3\": \"gemma3\",\n",
" \"phi3\": \"phi3\",\n",
" \"olmo\": \"olmo\"\n",
" }\n",
"\n",
"from langchain_openai import ChatOpenAI\n",
"llm = ChatOpenAI(model = models[\"gemma3\"], \n",
" api_key = api_key, \n",
" base_url = \"https://llm.nrp-nautilus.io\", \n",
" temperature=0)\n",
"\n",
"# Embedding model from NRP usually times out.\n",
"#embedding = OpenAIEmbeddings(model = \"embed-mistral\", api_key = api_key, base_url = \"https://llm.nrp-nautilus.io\")\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "2bf50abf-5ccd-4de5-9fc4-c9043a66a108",
"metadata": {},
"outputs": [],
"source": [
"from langchain.chains import create_retrieval_chain\n",
"from langchain.chains.combine_documents import create_stuff_documents_chain\n",
"from langchain_core.prompts import ChatPromptTemplate\n",
"system_prompt = (\n",
" \"You are an assistant for question-answering tasks. \"\n",
" \"Use the following scientific articles as the retrieved context to answer \"\n",
" \"the question. Appropriately cite the articles from the context on which your answer is based. \"\n",
" \"Do not attempt to cite articles that are not in the context.\"\n",
" \"If you don't know the answer, say that you don't know.\"\n",
" \"Use up to five sentences maximum and keep the \"\n",
" \"answer concise.\"\n",
" \"\\n\\n\"\n",
" \"{context}\"\n",
")\n",
"prompt = ChatPromptTemplate.from_messages(\n",
" [\n",
" (\"system\", system_prompt),\n",
" (\"human\", \"{input}\"),\n",
" ]\n",
")\n",
"\n",
"\n",
"question_answer_chain = create_stuff_documents_chain(llm, prompt)\n",
"\n",
"retriever = vectorstore.as_retriever()\n",
"rag_chain = create_retrieval_chain(retriever, question_answer_chain)\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "e15c64e7-0916-4042-8274-870e4fdb1af7",
"metadata": {},
"outputs": [],
"source": [
"prompt = \"I live in Tanzania and am having issues with lions breaking into my boma and preying on cattle. What interventions might work best for me?\"\n",
"results = rag_chain.invoke({\"input\": prompt})\n",
"results"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "35613607-2c36-4761-a8ea-8c0889530f34",
"metadata": {},
"outputs": [],
"source": [
"prompt = \"What are the most cost-effective prevention methods for elephants raiding my crops?\"\n",
"\n",
"results = rag_chain.invoke({\"input\": prompt})\n",
"results"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3dfc39f6-86e9-47c3-ab67-08f90ebbb823",
"metadata": {},
"outputs": [],
"source": [
"rag_chain.invoke({\"input\": \n",
" \"I have a small herd of goats and cattle and I am worried about jaguars preying on them. What preventative measures can I take?\"\n",
" })"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "56091874-0e41-4b35-be4f-08d8ec6faf56",
"metadata": {},
"outputs": [],
"source": [
"rag_chain.invoke({\"input\": \"I am trying to prevent coyotes from eating the calves of my free-range cattle. What may work best?\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "918dc691-6c66-46b2-8930-01dbeb6f712b",
"metadata": {},
"outputs": [],
"source": [
"rag_chain.invoke({\"input\": \"We have major issues with deer raiding our large agricultural fields. Is there anything I can try to prevent this that won’t break the bank?\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "07b9578c-9a89-4874-a34d-30a060ed3407",
"metadata": {},
"outputs": [],
"source": [
"rag_chain.invoke({\"input\": \"We live in a suburban area and bears sometimes come into our town to eat from our fruit trees and trash. What are the best ways for us to prevent this as a community? We don’t want to have to get rid of our fruit trees…\"})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "ba272b88-1622-4d06-9361-7f1e2ca89e73",
"metadata": {},
"outputs": [],
"source": [
"prompt = \"What cattle husbandry strategies might be helpful to prevent conflict if we live in wolf country?\"\n",
"\n",
"rag_chain.invoke({\"input\": prompt})"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "9d4d1bf4-4084-430d-8b2d-39ce1d6815db",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "d4bf2492-6852-43a7-8527-06ee4e9848c0",
"metadata": {},
"outputs": [],
"source": [
"## DRAFT exploring other embedding databases\n",
"\n",
"import os\n",
"from langchain_community.vectorstores import FAISS\n",
"from langchain_community.vectorstores import Chroma\n",
"from langchain_community.vectorstores import Qdrant\n",
"from qdrant_client import QdrantClient\n",
"from qdrant_client.models import Distance, VectorParams\n",
"import gc\n",
"import torch\n",
"\n",
"# Option 1: FAISS (Facebook AI Similarity Search) - Most memory efficient\n",
"def create_faiss_vectorstore(splits, embedding, persist_directory=\"./faiss_db\", batch_size=100):\n",
" \"\"\"\n",
" Create FAISS vector store with batched processing to minimize GPU RAM usage\n",
" \"\"\"\n",
" os.makedirs(persist_directory, exist_ok=True)\n",
" \n",
" # Process documents in batches to avoid GPU memory overflow\n",
" vectorstore = None\n",
" \n",
" for i in range(0, len(splits), batch_size):\n",
" batch = splits[i:i + batch_size]\n",
" print(f\"Processing batch {i//batch_size + 1}/{(len(splits) + batch_size - 1)//batch_size}\")\n",
" \n",
" if vectorstore is None:\n",
" # Create initial vectorstore with first batch\n",
" vectorstore = FAISS.from_documents(\n",
" documents=batch,\n",
" embedding=embedding\n",
" )\n",
" else:\n",
" # Add subsequent batches to existing vectorstore\n",
" batch_vectorstore = FAISS.from_documents(\n",
" documents=batch,\n",
" embedding=embedding\n",
" )\n",
" vectorstore.merge_from(batch_vectorstore)\n",
" \n",
" # Clean up temporary vectorstore\n",
" del batch_vectorstore\n",
" \n",
" # Force garbage collection and clear GPU cache if using CUDA\n",
" gc.collect()\n",
" if torch.cuda.is_available():\n",
" torch.cuda.empty_cache()\n",
" \n",
" # Save to disk\n",
" vectorstore.save_local(persist_directory)\n",
" print(f\"Vector store saved to {persist_directory}\")\n",
" \n",
" return vectorstore\n",
"\n",
"def load_faiss_vectorstore(embedding, persist_directory=\"./faiss_db\"):\n",
" \"\"\"Load existing FAISS vector store from disk\"\"\"\n",
" return FAISS.load_local(\n",
" persist_directory,\n",
" embedding,\n",
" allow_dangerous_deserialization=True # Only if you trust the source\n",
" )\n",
"\n",
"# Option 2: Chroma - Persistent SQLite-based storage\n",
"def create_chroma_vectorstore(splits, embedding, persist_directory=\"./chroma_db\", batch_size=100):\n",
" \"\"\"\n",
" Create Chroma vector store with batched processing\n",
" \"\"\"\n",
" # Initialize Chroma with persistence\n",
" vectorstore = Chroma(\n",
" persist_directory=persist_directory,\n",
" embedding_function=embedding\n",
" )\n",
" \n",
" # Add documents in batches\n",
" for i in range(0, len(splits), batch_size):\n",
" batch = splits[i:i + batch_size]\n",
" print(f\"Processing batch {i//batch_size + 1}/{(len(splits) + batch_size - 1)//batch_size}\")\n",
" \n",
" vectorstore.add_documents(batch)\n",
" \n",
" # Force garbage collection and clear GPU cache\n",
" gc.collect()\n",
" if torch.cuda.is_available():\n",
" torch.cuda.empty_cache()\n",
" \n",
" # Persist to disk\n",
" vectorstore.persist()\n",
" print(f\"Vector store persisted to {persist_directory}\")\n",
" \n",
" return vectorstore\n",
"\n",
"def load_chroma_vectorstore(embedding, persist_directory=\"./chroma_db\"):\n",
" \"\"\"Load existing Chroma vector store from disk\"\"\"\n",
" return Chroma(\n",
" persist_directory=persist_directory,\n",
" embedding_function=embedding\n",
" )\n",
"\n",
"# Option 3: Qdrant - High-performance vector database\n",
"def create_qdrant_vectorstore(splits, embedding, collection_name=\"documents\", \n",
" path=\"./qdrant_db\", batch_size=100):\n",
" \"\"\"\n",
" Create Qdrant vector store with local file-based storage\n",
" \"\"\"\n",
" # Initialize local Qdrant client\n",
" client = QdrantClient(path=path)\n",
" \n",
" # Get embedding dimension (embed a sample text)\n",
" sample_embedding = embedding.embed_query(\"sample text\")\n",
" embedding_dim = len(sample_embedding)\n",
" \n",
" # Create collection if it doesn't exist\n",
" try:\n",
" client.create_collection(\n",
" collection_name=collection_name,\n",
" vectors_config=VectorParams(size=embedding_dim, distance=Distance.COSINE)\n",
" )\n",
" except Exception as e:\n",
" print(f\"Collection might already exist: {e}\")\n",
" \n",
" # Create vectorstore\n",
" vectorstore = Qdrant(\n",
" client=client,\n",
" collection_name=collection_name,\n",
" embeddings=embedding\n",
" )\n",
" \n",
" # Add documents in batches\n",
" for i in range(0, len(splits), batch_size):\n",
" batch = splits[i:i + batch_size]\n",
" print(f\"Processing batch {i//batch_size + 1}/{(len(splits) + batch_size - 1)//batch_size}\")\n",
" \n",
" vectorstore.add_documents(batch)\n",
" \n",
" # Force garbage collection and clear GPU cache\n",
" gc.collect()\n",
" if torch.cuda.is_available():\n",
" torch.cuda.empty_cache()\n",
" \n",
" print(f\"Vector store created in {path}\")\n",
" return vectorstore\n",
"\n",
"def load_qdrant_vectorstore(embedding, collection_name=\"documents\", path=\"./qdrant_db\"):\n",
" \"\"\"Load existing Qdrant vector store from disk\"\"\"\n",
" client = QdrantClient(path=path)\n",
" return Qdrant(\n",
" client=client,\n",
" collection_name=collection_name,\n",
" embeddings=embedding\n",
" )\n"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "3cf725ad-69a3-4abd-9907-52427babf6d5",
"metadata": {},
"outputs": [],
"source": [
"\n",
"# Usage examples:\n",
"\n",
"# Replace your original code with one of these options:\n",
"\n",
"# Option 1: FAISS (Recommended for most use cases)\n",
"vectorstore = create_faiss_vectorstore(\n",
" splits=splits, \n",
" embedding=embedding, \n",
" persist_directory=\"./my_faiss_db\",\n",
" batch_size=50 # Adjust based on your GPU memory\n",
")\n",
"\n",
"# To load later:\n",
"# vectorstore = load_faiss_vectorstore(embedding, \"./my_faiss_db\")\n",
"\n",
"# Option 2: Chroma (Good for development and moderate scale)\n",
"# vectorstore = create_chroma_vectorstore(\n",
"# splits=splits,\n",
"# embedding=embedding,\n",
"# persist_directory=\"./my_chroma_db\",\n",
"# batch_size=50\n",
"# )\n",
"\n",
"# Option 3: Qdrant (Best for production and very large scale)\n",
"# vectorstore = create_qdrant_vectorstore(\n",
"# splits=splits,\n",
"# embedding=embedding,\n",
"# collection_name=\"my_documents\",\n",
"# path=\"./my_qdrant_db\",\n",
"# batch_size=50\n",
"# )\n",
"\n",
"# Memory optimization settings\n",
"def optimize_gpu_memory():\n",
" \"\"\"Additional GPU memory optimization\"\"\"\n",
" if torch.cuda.is_available():\n",
" # Set memory fraction if needed\n",
" torch.cuda.set_per_process_memory_fraction(0.8) # Use 80% of GPU memory\n",
" \n",
" # Enable memory mapping for large tensors\n",
" torch.backends.cuda.matmul.allow_tf32 = True\n",
" torch.backends.cudnn.allow_tf32 = True\n",
"\n",
"# Call before processing if you have GPU memory issues\n",
"# optimize_gpu_memory()"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "base",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.10"
}
},
"nbformat": 4,
"nbformat_minor": 5
}
|