Spaces:
Running
on
T4
Running
on
T4
added print logs
Browse files
RAG/rag_DocumentSearcher.py
CHANGED
@@ -53,12 +53,12 @@ def query_(awsauth,inputs, session_id,search_types):
|
|
53 |
# }
|
54 |
# }
|
55 |
}
|
56 |
-
|
57 |
path = st.session_state.input_index+"_mm/_search"
|
58 |
url = host+path
|
59 |
r = requests.get(url, auth=awsauth, json=query_mm, headers=headers)
|
60 |
response_mm = json.loads(r.text)
|
61 |
-
|
62 |
hits = response_mm['hits']['hits']
|
63 |
context = []
|
64 |
context_tables = []
|
@@ -227,9 +227,11 @@ def query_(awsauth,inputs, session_id,search_types):
|
|
227 |
"query_text": question
|
228 |
}
|
229 |
}}
|
|
|
230 |
r = requests.get(url, auth=awsauth, json=hybrid_payload, headers=headers)
|
231 |
response_ = json.loads(r.text)
|
232 |
-
print(
|
|
|
233 |
hits = response_['hits']['hits']
|
234 |
|
235 |
else:
|
@@ -341,7 +343,9 @@ def query_(awsauth,inputs, session_id,search_types):
|
|
341 |
|
342 |
|
343 |
llm_prompt = prompt_template.format(context="\n".join(total_context[0:3]),question=question)
|
|
|
344 |
output = invoke_models.invoke_llm_model( "\n\nHuman: {input}\n\nAssistant:".format(input=llm_prompt) ,False)
|
|
|
345 |
if(len(images_2)==0):
|
346 |
images_2 = images
|
347 |
return {'text':output,'source':total_context,'image':images_2,'table':df}
|
|
|
53 |
# }
|
54 |
# }
|
55 |
}
|
56 |
+
print("started mm_search: "+st.session_state.input_index)
|
57 |
path = st.session_state.input_index+"_mm/_search"
|
58 |
url = host+path
|
59 |
r = requests.get(url, auth=awsauth, json=query_mm, headers=headers)
|
60 |
response_mm = json.loads(r.text)
|
61 |
+
print("Finished mm_search: "+st.session_state.input_index)
|
62 |
hits = response_mm['hits']['hits']
|
63 |
context = []
|
64 |
context_tables = []
|
|
|
227 |
"query_text": question
|
228 |
}
|
229 |
}}
|
230 |
+
print("started main search with/wo rr: "+st.session_state.input_index)
|
231 |
r = requests.get(url, auth=awsauth, json=hybrid_payload, headers=headers)
|
232 |
response_ = json.loads(r.text)
|
233 |
+
print("Finished main search with/wo rr: "+st.session_state.input_index)
|
234 |
+
#print(response_)
|
235 |
hits = response_['hits']['hits']
|
236 |
|
237 |
else:
|
|
|
343 |
|
344 |
|
345 |
llm_prompt = prompt_template.format(context="\n".join(total_context[0:3]),question=question)
|
346 |
+
print("started LLM prompt: "+st.session_state.input_index)
|
347 |
output = invoke_models.invoke_llm_model( "\n\nHuman: {input}\n\nAssistant:".format(input=llm_prompt) ,False)
|
348 |
+
print("Finished LLM prompt: "+st.session_state.input_index)
|
349 |
if(len(images_2)==0):
|
350 |
images_2 = images
|
351 |
return {'text':output,'source':total_context,'image':images_2,'table':df}
|