CHATVLD / app.py
FRANCISCOFALT's picture
Update app.py
2ca3e3a verified
raw
history blame
8.04 kB
# ================================
# ChatVLD Futurista - Gradio 5.x
# ================================
import os
from pathlib import Path
import requests
import gradio as gr
import time
from langchain_groq import ChatGroq
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
# ================================
# API KEY
# ================================
if "GROQ_API_KEY" not in os.environ:
raise ValueError("❌ A variável de ambiente GROQ_API_KEY não está definida.")
# ================================
# PDFs
# ================================
pdf_urls = {
"documento.pdf": "https://drive.google.com/uc?id=1hin1AVM6vh0adcqH0RBOmEXZrn_Ebfyk",
"documento2.pdf": "https://drive.google.com/uc?id=1GVbPq8SDriIS5CQo0kT0EZEqwWwjGJmY"
}
for pdf_path, pdf_url in pdf_urls.items():
if not Path(pdf_path).exists():
r = requests.get(pdf_url)
with open(pdf_path, "wb") as f:
f.write(r.content)
# ================================
# CONFIG
# ================================
ID_MODEL = "deepseek-r1-distill-llama-70b"
TEMPERATURE = 0.7
FAISS_INDEX_PATH = "index_faiss"
# ================================
# FUNÇÕES
# ================================
def load_llm(model_id, temperature):
return ChatGroq(
model=model_id,
temperature=temperature,
groq_api_key=os.environ["GROQ_API_KEY"],
max_tokens=None,
timeout=None,
max_retries=2,
)
def extract_text_pdf(file_path):
loader = PyMuPDFLoader(file_path)
docs = loader.load()
return "\n".join([page.page_content for page in docs])
def config_retriever(pdf_files):
embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-m3")
if Path(FAISS_INDEX_PATH).exists():
vectorstore = FAISS.load_local(FAISS_INDEX_PATH, embeddings, allow_dangerous_deserialization=True)
else:
all_texts = ""
for file_path in pdf_files:
all_texts += extract_text_pdf(file_path) + "\n"
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
chunks = text_splitter.split_text(all_texts)
vectorstore = FAISS.from_texts(chunks, embeddings)
vectorstore.save_local(FAISS_INDEX_PATH)
return vectorstore.as_retriever(search_type='mmr', search_kwargs={'k': 3, 'fetch_k': 4})
def config_rag_chain(llm, retriever):
context_q_prompt = ChatPromptTemplate.from_messages([
("system","Dada a conversa e a pergunta, formule uma pergunta independente."),
MessagesPlaceholder("chat_history"),
("human", "Pergunta: {input}"),
])
hist_aware_retriever = create_history_aware_retriever(
llm=llm,
retriever=retriever,
prompt=context_q_prompt
)
system_prompt = """Você é um assistente virtual futurista da empresa VLD. Responda de forma clara e objetiva em português. Se não souber, diga que não sabe."""
qa_prompt = ChatPromptTemplate.from_messages([
("system", system_prompt),
MessagesPlaceholder("chat_history"),
("human", "Pergunta: {input}\n\nContexto: {context}"),
])
qa_chain = create_stuff_documents_chain(llm, qa_prompt)
return create_retrieval_chain(hist_aware_retriever, qa_chain)
llm = load_llm(ID_MODEL, TEMPERATURE)
retriever_instance = config_retriever(list(pdf_urls.keys()))
rag_chain = config_rag_chain(llm, retriever_instance)
chat_history = [AIMessage(content="🚀 Olá, sou o seu suporte virtual futurista! Como posso te ajudar?")]
def responder(pergunta):
chat_history.append(HumanMessage(content=pergunta))
try:
resposta = rag_chain.invoke({"input": pergunta, "chat_history": chat_history})["answer"]
resposta = resposta.split("</think>")[-1].strip() if "</think>" in resposta else resposta.strip()
except Exception as e:
resposta = f"❌ Erro: {str(e)}"
chat_history.append(AIMessage(content=resposta))
return resposta
# ================================
# CSS FUTURISTA
# ================================
custom_css = """
.gradio-chatbot { background-color: #f8f9fa; color: #111; }
.gradio-textbox textarea { background-color: #1c1c1c; color: #fff; border-radius: 8px; border: 1px solid #333; padding: 8px; }
.gradio-button, .gradio-button:active { background: linear-gradient(to right, #00c6ff, #0072ff); color: #fff; border: none; border-radius: 8px; }
/* 🎇 Estilo futurista para o título principal */
#titulo-principal {
text-align: center;
font-size: 32px;
font-weight: bold;
background: linear-gradient(90deg, #00c6ff, #8a2be2, #ff0080, #00c6ff);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
animation: brilho 5s linear infinite;
margin-bottom: 20px;
}
@keyframes brilho {
0% { background-position: 0% 50%; }
50% { background-position: 100% 50%; }
100% { background-position: 0% 50%; }
}
"""
# ================================
# INTERFACE GRADIO 5.x
# ================================
with gr.Blocks(css=custom_css, theme="soft") as iface:
# TÍTULO PRINCIPAL
gr.Markdown("🛸 SUPORTE AUTOMATIZADO", elem_id="titulo-principal")
with gr.Tabs():
# ====================
# Aba Chat Futurista
# ====================
with gr.TabItem("💭 ChatVLD"):
gr.Markdown("""🤖 **Olá!**
Sou o suporte para as impressoras **Codeline SS5632** e **LINX 7900**.
Em que posso ajudar?""")
chatbot = gr.Chatbot(type="messages")
with gr.Row():
txt = gr.Textbox(
placeholder="Diz o que tu quer macho...",
show_label=False,
lines=2
)
submit_btn = gr.Button("🚀 Arrocha")
with gr.Row():
clear_btn = gr.Button("🧹 Barrer a prosa")
new_chat_btn = gr.Button("✨ Nova prosa")
def enviar(msg, history):
history.append({"role": "user", "content": msg})
yield history, ""
history.append({"role": "assistant", "content": "🤖 Digitando..."})
yield history, ""
resposta = responder(msg)
history[-1] = {"role": "assistant", "content": resposta}
yield history, ""
def limpar():
chat_history.clear()
return [], ""
def novo_chat():
chat_history.clear()
chat_history.append(AIMessage(content="🤖 Novo chat iniciado. Como posso te ajudar?"))
return [{"role": "assistant", "content": "🤖 Novo chat iniciado. Como posso te ajudar?"}], ""
txt.submit(enviar, [txt, chatbot], [chatbot, txt])
submit_btn.click(enviar, [txt, chatbot], [chatbot, txt])
clear_btn.click(limpar, outputs=[chatbot, txt])
new_chat_btn.click(novo_chat, outputs=[chatbot, txt])
# ====================
# Aba VALID NODE com botão fixo
# ====================
with gr.TabItem("🌐 VALIDNODE"):
gr.Markdown("### Acesse o VALID NODE clicando no botão abaixo:")
gr.HTML(
'<button onclick="window.open(\'https://172.17.200.97\', \'_blank\')" '
'style="background: linear-gradient(to right, #00c6ff, #0072ff); '
'color: #fff; border: none; border-radius: 8px; padding: 10px 20px; '
'font-size: 16px; cursor: pointer;">🖥️ VALIDeNODE</button>'
)
iface.launch()