Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -24,7 +24,9 @@ os.environ['LANGCHAIN_API_KEY'] = 'lsv2_pt_ce80aac3833643dd893527f566a06bf9_667d
|
|
24 |
|
25 |
@st.cache_resource
|
26 |
def load_model():
|
27 |
-
model_name = "bigscience/bloom-560m"
|
|
|
|
|
28 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
29 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
30 |
return model, tokenizer
|
@@ -137,7 +139,7 @@ def run_with_timeout(func, args, timeout):
|
|
137 |
return result[0]
|
138 |
# In your Streamlit app
|
139 |
def generate_response(chain, query, context):
|
140 |
-
timeout_seconds =
|
141 |
result = run_with_timeout(chain.invoke, ({"question": query, "chat_history": st.session_state.messages},), timeout_seconds)
|
142 |
if result is None:
|
143 |
return "I apologize, but I couldn't generate a response in time. The query might be too complex for me to process quickly. Could you try simplifying your question?"
|
|
|
24 |
|
25 |
@st.cache_resource
|
26 |
def load_model():
|
27 |
+
# model_name = "bigscience/bloom-560m"
|
28 |
+
model_name = "distilbert-base-uncased"
|
29 |
+
|
30 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
31 |
model = AutoModelForCausalLM.from_pretrained(model_name)
|
32 |
return model, tokenizer
|
|
|
139 |
return result[0]
|
140 |
# In your Streamlit app
|
141 |
def generate_response(chain, query, context):
|
142 |
+
timeout_seconds = 120
|
143 |
result = run_with_timeout(chain.invoke, ({"question": query, "chat_history": st.session_state.messages},), timeout_seconds)
|
144 |
if result is None:
|
145 |
return "I apologize, but I couldn't generate a response in time. The query might be too complex for me to process quickly. Could you try simplifying your question?"
|