Spaces:
Running
Running
better visibility into the startup process
Browse files- src/app.py +6 -0
src/app.py
CHANGED
@@ -109,21 +109,27 @@ def ensure_model():
|
|
109 |
model_path = ensure_model()
|
110 |
|
111 |
# Configure local LLM with LlamaCPP
|
|
|
112 |
llm = LlamaCPP(
|
113 |
model_path=model_path,
|
114 |
temperature=0.7,
|
115 |
max_new_tokens=256,
|
116 |
context_window=2048
|
117 |
)
|
|
|
118 |
|
119 |
# Configure global settings
|
|
|
120 |
Settings.llm = llm
|
121 |
Settings.embed_model = HuggingFaceEmbedding(
|
122 |
model_name="sentence-transformers/all-MiniLM-L6-v2"
|
123 |
)
|
|
|
124 |
|
125 |
# Create the index at startup
|
|
|
126 |
symptom_index = create_symptom_index()
|
|
|
127 |
|
128 |
# --- System prompt ---
|
129 |
SYSTEM_PROMPT = """
|
|
|
109 |
model_path = ensure_model()
|
110 |
|
111 |
# Configure local LLM with LlamaCPP
|
112 |
+
print("\nInitializing LLM...")
|
113 |
llm = LlamaCPP(
|
114 |
model_path=model_path,
|
115 |
temperature=0.7,
|
116 |
max_new_tokens=256,
|
117 |
context_window=2048
|
118 |
)
|
119 |
+
print("LLM initialized successfully")
|
120 |
|
121 |
# Configure global settings
|
122 |
+
print("\nConfiguring settings...")
|
123 |
Settings.llm = llm
|
124 |
Settings.embed_model = HuggingFaceEmbedding(
|
125 |
model_name="sentence-transformers/all-MiniLM-L6-v2"
|
126 |
)
|
127 |
+
print("Settings configured")
|
128 |
|
129 |
# Create the index at startup
|
130 |
+
print("\nCreating symptom index...")
|
131 |
symptom_index = create_symptom_index()
|
132 |
+
print("Index created successfully")
|
133 |
|
134 |
# --- System prompt ---
|
135 |
SYSTEM_PROMPT = """
|