boryasbora commited on
Commit
67898b1
·
verified ·
1 Parent(s): 45c1857

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +25 -10
app.py CHANGED
@@ -15,9 +15,20 @@ from langchain_core.output_parsers import StrOutputParser
15
  from langchain_core.runnables import RunnableLambda
16
  from datetime import date
17
  from transformers import AutoModelForCausalLM, AutoTokenizer
18
- from setup import download_olmo_model
19
- model_path = download_olmo_model()
20
- # import subprocess
 
 
 
 
 
 
 
 
 
 
 
21
 
22
  # # Define the path to your bash script
23
  # script_path = "./start.sh"
@@ -90,13 +101,17 @@ def get_chain(temperature):
90
  retriever = load_retriever(docstore_path,chroma_path,embeddings,child_splitter,parent_splitter)
91
 
92
  # Replace the local OLMOLLM with the Hugging Face model
93
- llm = LlamaCpp(
94
- model_path=str(model_path),
95
- temperature=temperature,
96
- max_tokens=3000,
97
- verbose=False,
98
- echo=False
99
- )
 
 
 
 
100
 
101
 
102
 
 
15
  from langchain_core.runnables import RunnableLambda
16
  from datetime import date
17
  from transformers import AutoModelForCausalLM, AutoTokenizer
18
+ from setup import download_olmo_model, OLMO_MODEL
19
+
20
+ # Ensure model is downloaded before proceeding
21
+ @st.cache_resource
22
+ def ensure_model_downloaded():
23
+ try:
24
+ model_path = download_olmo_model()
25
+ st.success(f"Model successfully loaded from {model_path}")
26
+ return model_path
27
+ except Exception as e:
28
+ st.error(f"Failed to download or locate the model: {str(e)}")
29
+ st.stop()
30
+
31
+ model_path = ensure_model_downloaded()# import subprocess
32
 
33
  # # Define the path to your bash script
34
  # script_path = "./start.sh"
 
101
  retriever = load_retriever(docstore_path,chroma_path,embeddings,child_splitter,parent_splitter)
102
 
103
  # Replace the local OLMOLLM with the Hugging Face model
104
+ try:
105
+ llm = LlamaCpp(
106
+ model_path=str(model_path),
107
+ temperature=temperature,
108
+ max_tokens=3000,
109
+ verbose=False,
110
+ echo=False
111
+ )
112
+ except Exception as e:
113
+ st.error(f"Failed to initialize LlamaCpp: {str(e)}")
114
+ st.stop()
115
 
116
 
117