Nielo47 commited on
Commit
9b64397
·
1 Parent(s): 9fe61fb

Update space

Browse files
Files changed (1) hide show
  1. pages/main/scripts.py +22 -17
pages/main/scripts.py CHANGED
@@ -1,13 +1,15 @@
1
  import faiss
2
  import gradio as gr
3
- from typing import Any, Generator
4
  from sentence_transformers import SentenceTransformer
5
- from utils.llm_response import generate_response_with_llm # A função unificada agora trata as estratégias de RAG e LLM
6
  from utils.phrase_extractor import process_file_content
7
- #from utils.report_creation import generate_report
 
8
  from .strings import STRINGS
 
9
  # DEPRECATED: A função volta com a consolidação de um futuro OCR.
10
- #def extract_phrases_from_gradio_file(gradio_file: gr.File) -> gr.Textbox:
11
  # """
12
  # Utilizes the 'process_file' function from 'utils.phrase_extractor' to read the
13
  # file content and extract phrases, returning them as a text block for Gradio.
@@ -18,20 +20,23 @@ from .strings import STRINGS
18
  # try:
19
  # # Chama a função unificada de processamento de arquivo que retorna uma lista de frases
20
  # phrases = process_file_content(gradio_file.name)
21
- #
22
  # phrases_text = "\n".join(phrases)
23
  # return gr.Textbox(value=phrases_text, placeholder=STRINGS["TEXT_INPUT_PLACEHOLDER_LOADED"])
24
  # except Exception as e:
25
  # return gr.Textbox(value=f"Error: {e}", placeholder=STRINGS["TEXT_INPUT_PLACER_EMPTY"])
26
 
27
- def process_phrases_with_rag_llm(input_phrases_text: str, rag_docs:list[str], rag_index:faiss.Index, rag_embedder:SentenceTransformer) -> Generator[tuple[gr.Textbox, gr.Textbox, gr.Tabs, gr.TabItem]]:
 
 
 
28
  """
29
  Receives a block of text (phrases separated by newlines) and processes it
30
  with the RAG+LLM API (`res_generate_API`) using a multiple-context strategy.
31
  Returns a status textbox, a formatted responses textbox, and updates tabs to switch to the results tab.
32
  """
33
- print(f"Processando o bloco de frases para geração de resposta: \"{input_phrases_text[:100]}...\"")
34
- current_symbol = " ♾️" # Emojis para indicar status de processamento e sucesso
35
 
36
  # --- Ação 1: Mudar de aba IMEDIATAMENTE e mostrar mensagem de processamento ---
37
  # O 'yield' envia: (Status, Resultado, Tabs)
@@ -39,9 +44,9 @@ def process_phrases_with_rag_llm(input_phrases_text: str, rag_docs:list[str], ra
39
  gr.update(value=STRINGS["TXTBOX_STATUS_IDLE"], interactive=False),
40
  gr.update(value="", interactive=False),
41
  gr.update(selected=1),
42
- gr.update(label=STRINGS["TAB_1_TITLE"]+current_symbol, interactive=True)
43
- )
44
-
45
  # time.sleep(1) # Simula um pequeno atraso para processamento
46
 
47
  try:
@@ -53,16 +58,16 @@ def process_phrases_with_rag_llm(input_phrases_text: str, rag_docs:list[str], ra
53
  documents=rag_docs,
54
  index=rag_index,
55
  embedder=rag_embedder,
56
- llm_choice='gemini', # ou 'ollama', conforme a necessidade
57
- rag_strategy='multiple' # A chave para usar a busca por múltiplos contextos
58
  )
59
 
60
- # with open("./sandbox/respostateste.txt", "r", encoding="utf-8") as arquivo:
61
- # llm_response = arquivo.read() #TODO: Test Only
62
 
63
  status_message = STRINGS["TXTBOX_STATUS_OK"]
64
  formatted_output = f"--- Resposta Fornecida pela LLM ---\n{llm_response}\n"
65
- current_symbol = " ✅"
66
 
67
  except Exception as e:
68
  status_message = STRINGS["TXTBOX_STATUS_ERROR"]
@@ -75,5 +80,5 @@ def process_phrases_with_rag_llm(input_phrases_text: str, rag_docs:list[str], ra
75
  gr.update(value=status_message, interactive=False),
76
  gr.update(value=formatted_output, interactive=False),
77
  gr.update(),
78
- gr.update(label=STRINGS["TAB_1_TITLE"]+current_symbol, interactive=True)
79
  )
 
1
  import faiss
2
  import gradio as gr
3
+ from typing import Any, Generator, Iterator
4
  from sentence_transformers import SentenceTransformer
5
+ from utils.llm_response import generate_response_with_llm # A função unificada agora trata as estratégias de RAG e LLM
6
  from utils.phrase_extractor import process_file_content
7
+
8
+ # from utils.report_creation import generate_report
9
  from .strings import STRINGS
10
+
11
  # DEPRECATED: A função volta com a consolidação de um futuro OCR.
12
+ # def extract_phrases_from_gradio_file(gradio_file: gr.File) -> gr.Textbox:
13
  # """
14
  # Utilizes the 'process_file' function from 'utils.phrase_extractor' to read the
15
  # file content and extract phrases, returning them as a text block for Gradio.
 
20
  # try:
21
  # # Chama a função unificada de processamento de arquivo que retorna uma lista de frases
22
  # phrases = process_file_content(gradio_file.name)
23
+ #
24
  # phrases_text = "\n".join(phrases)
25
  # return gr.Textbox(value=phrases_text, placeholder=STRINGS["TEXT_INPUT_PLACEHOLDER_LOADED"])
26
  # except Exception as e:
27
  # return gr.Textbox(value=f"Error: {e}", placeholder=STRINGS["TEXT_INPUT_PLACER_EMPTY"])
28
 
29
+
30
+ def process_phrases_with_rag_llm(
31
+ input_phrases_text: str, rag_docs: list[str], rag_index: faiss.Index, rag_embedder: SentenceTransformer
32
+ ) -> Iterator[tuple[gr.Textbox, gr.Textbox, gr.Tabs, gr.TabItem]]:
33
  """
34
  Receives a block of text (phrases separated by newlines) and processes it
35
  with the RAG+LLM API (`res_generate_API`) using a multiple-context strategy.
36
  Returns a status textbox, a formatted responses textbox, and updates tabs to switch to the results tab.
37
  """
38
+ print(f'Processando o bloco de frases para geração de resposta: "{input_phrases_text[:100]}..."')
39
+ current_symbol = " ♾️" # Emojis para indicar status de processamento e sucesso
40
 
41
  # --- Ação 1: Mudar de aba IMEDIATAMENTE e mostrar mensagem de processamento ---
42
  # O 'yield' envia: (Status, Resultado, Tabs)
 
44
  gr.update(value=STRINGS["TXTBOX_STATUS_IDLE"], interactive=False),
45
  gr.update(value="", interactive=False),
46
  gr.update(selected=1),
47
+ gr.update(label=STRINGS["TAB_1_TITLE"] + current_symbol, interactive=True),
48
+ )
49
+
50
  # time.sleep(1) # Simula um pequeno atraso para processamento
51
 
52
  try:
 
58
  documents=rag_docs,
59
  index=rag_index,
60
  embedder=rag_embedder,
61
+ llm_choice="gemini", # ou 'ollama', conforme a necessidade
62
+ rag_strategy="multiple", # A chave para usar a busca por múltiplos contextos
63
  )
64
 
65
+ # with open("./sandbox/respostateste.txt", "r", encoding="utf-8") as arquivo:
66
+ # llm_response = arquivo.read() #TODO: Test Only
67
 
68
  status_message = STRINGS["TXTBOX_STATUS_OK"]
69
  formatted_output = f"--- Resposta Fornecida pela LLM ---\n{llm_response}\n"
70
+ current_symbol = " ✅"
71
 
72
  except Exception as e:
73
  status_message = STRINGS["TXTBOX_STATUS_ERROR"]
 
80
  gr.update(value=status_message, interactive=False),
81
  gr.update(value=formatted_output, interactive=False),
82
  gr.update(),
83
+ gr.update(label=STRINGS["TAB_1_TITLE"] + current_symbol, interactive=True),
84
  )