Spaces:
Running
on
T4
Running
on
T4
added print logs
Browse files- app.py +16 -0
- pages/Multimodal_Conversational_Search.py +0 -1
- utilities/invoke_models.py +1 -0
app.py
CHANGED
@@ -7,6 +7,8 @@ import urllib.request
|
|
7 |
import tarfile
|
8 |
from yaml.loader import SafeLoader
|
9 |
import warnings
|
|
|
|
|
10 |
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
11 |
|
12 |
# Page setup
|
@@ -16,6 +18,20 @@ st.set_page_config(
|
|
16 |
|
17 |
)
|
18 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
19 |
st.markdown("""
|
20 |
<div id="home-page">
|
21 |
""", unsafe_allow_html=True)
|
|
|
7 |
import tarfile
|
8 |
from yaml.loader import SafeLoader
|
9 |
import warnings
|
10 |
+
import utilities.invoke_models as invoke_models
|
11 |
+
import threading
|
12 |
warnings.filterwarnings("ignore", category=DeprecationWarning)
|
13 |
|
14 |
# Page setup
|
|
|
18 |
|
19 |
)
|
20 |
|
21 |
+
def async_bedrock_warmup():
|
22 |
+
try:
|
23 |
+
_ = invoke_models.invoke_model_mm("hi", "none")
|
24 |
+
_ = invoke_models.invoke_model("hi")
|
25 |
+
_ = invoke_models.invoke_llm_model("hi", False)
|
26 |
+
st.session_state.bedrock_warmup_done = True
|
27 |
+
print("[Background Warmup] Done.")
|
28 |
+
except Exception as e:
|
29 |
+
print(f"[Background Warmup] Failed: {e}")
|
30 |
+
|
31 |
+
if "bedrock_warmup_done" not in st.session_state:
|
32 |
+
threading.Thread(target=async_bedrock_warmup).start()
|
33 |
+
|
34 |
+
|
35 |
st.markdown("""
|
36 |
<div id="home-page">
|
37 |
""", unsafe_allow_html=True)
|
pages/Multimodal_Conversational_Search.py
CHANGED
@@ -100,7 +100,6 @@ if "input_query" not in st.session_state:
|
|
100 |
if(st.session_state.input_index == "covid19ie"):
|
101 |
st.session_state.input_query="How many aged above 85 years died due to covid ?"#"What is the projected energy percentage from renewable sources in future?"#"Which city in United Kingdom has the highest average housing price ?"#"How many aged above 85 years died due to covid ?"# What is the projected energy from renewable sources ?"
|
102 |
|
103 |
-
|
104 |
st.markdown("""
|
105 |
<style>
|
106 |
[data-testid=column]:nth-of-type(2) [data-testid=stVerticalBlock]{
|
|
|
100 |
if(st.session_state.input_index == "covid19ie"):
|
101 |
st.session_state.input_query="How many aged above 85 years died due to covid ?"#"What is the projected energy percentage from renewable sources in future?"#"Which city in United Kingdom has the highest average housing price ?"#"How many aged above 85 years died due to covid ?"# What is the projected energy from renewable sources ?"
|
102 |
|
|
|
103 |
st.markdown("""
|
104 |
<style>
|
105 |
[data-testid=column]:nth-of-type(2) [data-testid=stVerticalBlock]{
|
utilities/invoke_models.py
CHANGED
@@ -7,6 +7,7 @@ from langchain_experimental.agents.agent_toolkits import create_pandas_dataframe
|
|
7 |
#from langchain_core.prompts import ChatPromptTemplate
|
8 |
from langchain_community.chat_models import BedrockChat
|
9 |
import streamlit as st
|
|
|
10 |
#from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
|
11 |
#import torch
|
12 |
|
|
|
7 |
#from langchain_core.prompts import ChatPromptTemplate
|
8 |
from langchain_community.chat_models import BedrockChat
|
9 |
import streamlit as st
|
10 |
+
import threading
|
11 |
#from transformers import VisionEncoderDecoderModel, ViTImageProcessor, AutoTokenizer
|
12 |
#import torch
|
13 |
|