Spaces:
Running
Running
no problems, local llm setup
Browse files- src/app.py +4 -5
src/app.py
CHANGED
@@ -1,8 +1,8 @@
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from llama_index.core import Settings, ServiceContext
|
4 |
-
from
|
5 |
-
from llama_index.llms import HuggingFaceLLM
|
6 |
from ctransformers import AutoModelForCausalLM
|
7 |
from parse_tabular import create_symptom_index
|
8 |
import json
|
@@ -23,8 +23,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
23 |
llm = HuggingFaceLLM(
|
24 |
model=model,
|
25 |
context_window=2048,
|
26 |
-
max_new_tokens=256
|
27 |
-
temperature=0.7
|
28 |
)
|
29 |
|
30 |
# Create service context with local LLM
|
@@ -34,7 +33,7 @@ service_context = ServiceContext.from_defaults(
|
|
34 |
)
|
35 |
|
36 |
# Create the index at startup with local service context
|
37 |
-
symptom_index = create_symptom_index(
|
38 |
|
39 |
# --- System prompt ---
|
40 |
SYSTEM_PROMPT = """
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
from llama_index.core import Settings, ServiceContext
|
4 |
+
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
5 |
+
from llama_index.llms.huggingface import HuggingFaceLLM
|
6 |
from ctransformers import AutoModelForCausalLM
|
7 |
from parse_tabular import create_symptom_index
|
8 |
import json
|
|
|
23 |
llm = HuggingFaceLLM(
|
24 |
model=model,
|
25 |
context_window=2048,
|
26 |
+
max_new_tokens=256
|
|
|
27 |
)
|
28 |
|
29 |
# Create service context with local LLM
|
|
|
33 |
)
|
34 |
|
35 |
# Create the index at startup with local service context
|
36 |
+
symptom_index = create_symptom_index()
|
37 |
|
38 |
# --- System prompt ---
|
39 |
SYSTEM_PROMPT = """
|