Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -6,8 +6,10 @@ from streamlit_drawable_canvas import st_canvas
|
|
6 |
import torch
|
7 |
from diffusers import AutoPipelineForInpainting
|
8 |
import numpy as np
|
|
|
9 |
from streamlit_image_select import image_select
|
10 |
import os
|
|
|
11 |
import requests
|
12 |
from streamlit_navigation_bar import st_navbar
|
13 |
from langchain_community.llms import Ollama
|
@@ -23,7 +25,7 @@ from streamlit_modal import Modal
|
|
23 |
import streamlit.components.v1 as components
|
24 |
from datetime import datetime
|
25 |
from streamlit_js_eval import streamlit_js_eval
|
26 |
-
|
27 |
def consume_llm_api(prompt):
|
28 |
"""
|
29 |
Sends a prompt to the LLM API and processes the streamed response.
|
@@ -45,7 +47,9 @@ def consume_llm_api(prompt):
|
|
45 |
print(f"Error consuming API: {e}")
|
46 |
except Exception as e:
|
47 |
print(f"Unexpected error: {e}")
|
48 |
-
|
|
|
|
|
49 |
def image_to_base64(image_path):
|
50 |
with open(image_path, "rb") as img_file:
|
51 |
return base64.b64encode(img_file.read()).decode()
|
@@ -188,15 +192,17 @@ with column2:
|
|
188 |
for index,prompts_ in enumerate(dictionary['every_prompt_with_val'][::-1]):
|
189 |
if prompts_[-1]=="@working":
|
190 |
if index==0:
|
191 |
-
|
|
|
192 |
data_need=st.write_stream(consume_llm_api(prompts_[0]))
|
193 |
dictionary['every_prompt_with_val'][-1]=(prompts_[0],str(data_need))
|
194 |
|
195 |
elif isinstance(prompts_[-1],str):
|
|
|
196 |
if index==0:
|
197 |
-
st.text_area(label=
|
198 |
else:
|
199 |
-
st.text_area(label=
|
200 |
|
201 |
else:
|
202 |
st.write(prompts_[0].upper())
|
@@ -397,20 +403,47 @@ with column1:
|
|
397 |
changes,implementation,current=st.columns([0.01,0.9,0.01])
|
398 |
|
399 |
with implementation:
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
414 |
|
415 |
|
416 |
|
@@ -424,9 +457,15 @@ with column1:
|
|
424 |
|
425 |
# run=st.button("run_experiment")
|
426 |
|
|
|
|
|
|
|
|
|
|
|
427 |
|
428 |
-
|
429 |
-
|
|
|
430 |
if prompt:
|
431 |
|
432 |
text_or_image=multimodel_output(prompt)
|
|
|
6 |
import torch
|
7 |
from diffusers import AutoPipelineForInpainting
|
8 |
import numpy as np
|
9 |
+
from sentence_transformers import SentenceTransformer,util
|
10 |
from streamlit_image_select import image_select
|
11 |
import os
|
12 |
+
import PyPDF2
|
13 |
import requests
|
14 |
from streamlit_navigation_bar import st_navbar
|
15 |
from langchain_community.llms import Ollama
|
|
|
25 |
import streamlit.components.v1 as components
|
26 |
from datetime import datetime
|
27 |
from streamlit_js_eval import streamlit_js_eval
|
28 |
+
from streamlit_pdf_viewer import pdf_viewer
|
29 |
def consume_llm_api(prompt):
|
30 |
"""
|
31 |
Sends a prompt to the LLM API and processes the streamed response.
|
|
|
47 |
print(f"Error consuming API: {e}")
|
48 |
except Exception as e:
|
49 |
print(f"Unexpected error: {e}")
|
50 |
+
def send_prompt():
|
51 |
+
return "please respond according to the prompt asked below from the above context"
|
52 |
+
|
53 |
def image_to_base64(image_path):
|
54 |
with open(image_path, "rb") as img_file:
|
55 |
return base64.b64encode(img_file.read()).decode()
|
|
|
192 |
for index,prompts_ in enumerate(dictionary['every_prompt_with_val'][::-1]):
|
193 |
if prompts_[-1]=="@working":
|
194 |
if index==0:
|
195 |
+
|
196 |
+
st.write(prompts_[0].split(send_prompt())[-1].upper() if send_prompt() in prompts_[0] else prompts_[0].upper())
|
197 |
data_need=st.write_stream(consume_llm_api(prompts_[0]))
|
198 |
dictionary['every_prompt_with_val'][-1]=(prompts_[0],str(data_need))
|
199 |
|
200 |
elif isinstance(prompts_[-1],str):
|
201 |
+
show_case_text=prompts_[0].split(send_prompt())[-1].upper() if send_prompt() in prompts_[0] else prompts_[0].upper()
|
202 |
if index==0:
|
203 |
+
st.text_area(label=show_case_text,value=prompts_[-1],height=500,key=str(index))
|
204 |
else:
|
205 |
+
st.text_area(label=show_case_text,value=prompts_[-1],key=str(index))
|
206 |
|
207 |
else:
|
208 |
st.write(prompts_[0].upper())
|
|
|
403 |
changes,implementation,current=st.columns([0.01,0.9,0.01])
|
404 |
|
405 |
with implementation:
|
406 |
+
if bg_doc:
|
407 |
+
|
408 |
+
canvas_result=None
|
409 |
+
binary_data = bg_doc.getvalue()
|
410 |
+
pdf_viewer(input=binary_data,
|
411 |
+
width=int(screen_width//2.3) if screen_width!=820 else screen_width//2,
|
412 |
+
height=int(screen_height//2.16) if screen_height!=1180 else screen_height//2)
|
413 |
+
with open("temp.pdf", "wb") as f:
|
414 |
+
f.write(bg_doc.getbuffer())
|
415 |
+
|
416 |
+
# Process the uploaded PDF file
|
417 |
+
data = process_pdf("temp.pdf")
|
418 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=7500, chunk_overlap=100)
|
419 |
+
chunks = text_splitter.split_documents(data)
|
420 |
+
# chunk_texts = [str(chunk.page_content) for chunk in chunks]
|
421 |
+
# print("testing",chunk_texts)
|
422 |
+
model_name = "all-MiniLM-L6-v2"
|
423 |
+
model = SentenceTransformer(model_name)
|
424 |
+
embeddings = [model.encode(str(chunk.page_content)) for chunk in chunks]
|
425 |
+
|
426 |
+
vector_store = []
|
427 |
+
for chunk, embedding in zip(chunks, embeddings):
|
428 |
+
vector_store.append((embedding, chunk.page_content) )
|
429 |
+
|
430 |
+
else:
|
431 |
+
|
432 |
+
|
433 |
+
st.write("<br>"*5,unsafe_allow_html=True)
|
434 |
+
canvas_result = st_canvas(
|
435 |
+
fill_color="rgba(255, 165, 0, 0.3)", # Fixed fill color with some opacity
|
436 |
+
stroke_width=stroke_width,
|
437 |
+
stroke_color=stroke_color,
|
438 |
+
background_color=bg_color,
|
439 |
+
background_image=gen_image if gen_image else Image.open("/home/user/app/ALL_image_formation/image_gen.png"),
|
440 |
+
update_streamlit=True,
|
441 |
+
height=screen_height//2.16 if screen_height!=1180 else screen_height/2,
|
442 |
+
width=screen_width//2.3 if screen_width!=820 else screen_width/2,
|
443 |
+
drawing_mode=drawing_mode,
|
444 |
+
point_display_radius=point_display_radius if drawing_mode == 'point' else 0,
|
445 |
+
key="canvas",
|
446 |
+
)
|
447 |
|
448 |
|
449 |
|
|
|
457 |
|
458 |
# run=st.button("run_experiment")
|
459 |
|
460 |
+
if bg_doc and prompt:
|
461 |
+
query_embedding = model.encode([prompt])
|
462 |
+
retrieved_chunks = max([(util.cos_sim(match[0],query_embedding),match[-1])for match in vector_store])[-1]
|
463 |
+
print(retrieved_chunks)
|
464 |
+
prompt = "Context: "+ retrieved_chunks +"\n"+send_prompt()+ "\n"+prompt
|
465 |
|
466 |
+
modifiedValue="@working"
|
467 |
+
dictionary['every_prompt_with_val'].append((prompt,modifiedValue))
|
468 |
+
elif not bg_doc and canvas_result.image_data is not None:
|
469 |
if prompt:
|
470 |
|
471 |
text_or_image=multimodel_output(prompt)
|