# ================================ # ChatVLD Futurista - Gradio 5.x # ================================ import os from pathlib import Path import requests import gradio as gr import time from langchain_groq import ChatGroq from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder from langchain_core.messages import AIMessage, HumanMessage from langchain_community.document_loaders import PyMuPDFLoader from langchain_text_splitters import RecursiveCharacterTextSplitter from langchain_huggingface import HuggingFaceEmbeddings from langchain_community.vectorstores import FAISS from langchain.chains import create_history_aware_retriever, create_retrieval_chain from langchain.chains.combine_documents import create_stuff_documents_chain # ================================ # API KEY # ================================ if "GROQ_API_KEY" not in os.environ: raise ValueError("❌ A variável de ambiente GROQ_API_KEY não está definida.") # ================================ # PDFs # ================================ pdf_urls = { "Codeline SS5632": "https://drive.google.com/uc?id=1s1OPWbxxu8ADBQBjmTfPe1tj-aLcEEIH", "Linx 7900": "https://drive.google.com/uc?id=1GVbPq8SDriIS5CQo0kT0EZEqwWwjGJmY" } for pdf_name, pdf_url in pdf_urls.items(): pdf_path = f"{pdf_name}.pdf" if not Path(pdf_path).exists(): r = requests.get(pdf_url) with open(pdf_path, "wb") as f: f.write(r.content) # ================================ # CONFIG # ================================ ID_MODEL = "deepseek-r1-distill-llama-70b" TEMPERATURE = 0.7 # ================================ # FUNÇÕES # ================================ def load_llm(model_id, temperature): return ChatGroq( model=model_id, temperature=temperature, groq_api_key=os.environ["GROQ_API_KEY"], max_tokens=None, timeout=None, max_retries=2, ) def extract_text_pdf(file_path): loader = PyMuPDFLoader(file_path) docs = loader.load() return "\n".join([page.page_content for page in docs]) def config_retriever(pdf_files, nome_impressora): embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-m3") faiss_path = f"index_faiss_{nome_impressora.replace(' ', '_')}" # índice único por impressora if Path(faiss_path).exists(): vectorstore = FAISS.load_local(faiss_path, embeddings, allow_dangerous_deserialization=True) else: all_texts = "" for file_path in pdf_files: all_texts += extract_text_pdf(file_path) + "\n" text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200) chunks = text_splitter.split_text(all_texts) vectorstore = FAISS.from_texts(chunks, embeddings) vectorstore.save_local(faiss_path) return vectorstore.as_retriever(search_type='mmr', search_kwargs={'k': 3, 'fetch_k': 4}) def config_rag_chain(llm, retriever): context_q_prompt = ChatPromptTemplate.from_messages([ ("system","Dada a conversa e a pergunta, formule uma pergunta independente."), MessagesPlaceholder("chat_history"), ("human", "Pergunta: {input}"), ]) hist_aware_retriever = create_history_aware_retriever( llm=llm, retriever=retriever, prompt=context_q_prompt ) system_prompt = """Você é um assistente virtual futurista da empresa VLD. Responda de forma clara e objetiva em português. Se não souber, diga que não sabe.""" qa_prompt = ChatPromptTemplate.from_messages([ ("system", system_prompt), MessagesPlaceholder("chat_history"), ("human", "Pergunta: {input}\n\nContexto: {context}"), ]) qa_chain = create_stuff_documents_chain(llm, qa_prompt) return create_retrieval_chain(hist_aware_retriever, qa_chain) # ================================ # VARIÁVEIS GLOBAIS # ================================ llm = load_llm(ID_MODEL, TEMPERATURE) retrievers_cache = {} chains_cache = {} chat_history = [AIMessage(content="🚀 Olá, sou o seu suporte virtual futurista! Como posso te ajudar?")] # ================================ # PRÉ-CARREGAR TODOS OS ÍNDICES # ================================ for nome in pdf_urls.keys(): retrievers_cache[nome] = config_retriever([f"{nome}.pdf"], nome) chains_cache[nome] = config_rag_chain(llm, retrievers_cache[nome]) # Define padrão inicial current_chain = chains_cache["Codeline SS5632"] # ================================ # SELEÇÃO DE IMPRESSORA # ================================ def set_impressora(nome_impressora): global current_chain current_chain = chains_cache[nome_impressora] return f"📂 Impressora selecionada: {nome_impressora}" # ================================ # FUNÇÃO DE RESPOSTA # ================================ def responder(pergunta): global current_chain if current_chain is None: return "⚠️ Por favor, escolha primeiro a impressora que deseja consultar." chat_history.append(HumanMessage(content=pergunta)) try: resposta = current_chain.invoke({"input": pergunta, "chat_history": chat_history})["answer"] resposta = resposta.split("")[-1].strip() if "" in resposta else resposta.strip() except Exception as e: resposta = f"❌ Erro: {str(e)}" chat_history.append(AIMessage(content=resposta)) return resposta # ================================ # CSS FUTURISTA # ================================ custom_css = """ .gradio-chatbot { background-color: #f8f9fa; color: #111; } .gradio-textbox textarea { background-color: #1c1c1c; color: #fff; border-radius: 8px; border: 1px solid #333; padding: 8px; } .gradio-button, .gradio-button:active { background: linear-gradient(to right, #00c6ff, #0072ff); color: #fff; border: none; border-radius: 8px; } /* 🎇 Estilo futurista para o título principal */ #titulo-principal { text-align: center; font-size: 40px; font-weight: bold; background: linear-gradient(90deg, #00c6ff, #8a2be2, #ff0080, #00c6ff); -webkit-background-clip: text; -webkit-text-fill-color: transparent; animation: brilho 5s linear infinite; margin-bottom: 20px; } @keyframes brilho { 0% { background-position: 0% 50%; } 50% { background-position: 100% 50%; } 100% { background-position: 0% 50%; } } """ # ================================ # INTERFACE GRADIO 5.x # ================================ with gr.Blocks(css=custom_css, theme="soft") as iface: # TÍTULO PRINCIPAL gr.Markdown("🛸 SUPORTE AUTOMATIZADO", elem_id="titulo-principal") with gr.Tabs(): # ==================== # Aba Chat Futurista # ==================== with gr.TabItem("💭 CHATVLD"): gr.Markdown("""🤖 **Olá!** Sou o suporte para as impressoras **Codeline SS5632** e **LINX 7900**.""") # Aviso acima da seleção gr.Markdown("📌 **Por gentileza, escolha a impressora que deseja consultar.**") # Dropdown para escolher impressora impressora_select = gr.Dropdown( choices=list(pdf_urls.keys()), label="Selecione a impressora", value="Codeline SS5632" # já vem carregada ) status_box = gr.Textbox(label="Status", interactive=False) def troca_impressora(nome_impressora): return "⏳ Montando base de conhecimento, aguarde...", set_impressora(nome_impressora) impressora_select.change( fn=troca_impressora, inputs=impressora_select, outputs=[status_box, status_box] ) chatbot = gr.Chatbot(type="messages") with gr.Row(): txt = gr.Textbox( placeholder="Diz o que tu quer macho...", show_label=False, lines=2 ) submit_btn = gr.Button("🚀 Arrocha") with gr.Row(): clear_btn = gr.Button("🧹 Barrer a prosa") new_chat_btn = gr.Button("✨ Nova prosa") def enviar(msg, history): history.append({"role": "user", "content": msg}) yield history, "" history.append({"role": "assistant", "content": "🤖 Digitando..."}) yield history, "" resposta = responder(msg) history[-1] = {"role": "assistant", "content": resposta} yield history, "" def limpar(): chat_history.clear() return [], "" def novo_chat(): chat_history.clear() chat_history.append(AIMessage(content="🤖 Novo chat iniciado. Como posso te ajudar?")) return [{"role": "assistant", "content": "🤖 Novo chat iniciado. Como posso te ajudar?"}], "" txt.submit(enviar, [txt, chatbot], [chatbot, txt]) submit_btn.click(enviar, [txt, chatbot], [chatbot, txt]) clear_btn.click(limpar, outputs=[chatbot, txt]) new_chat_btn.click(novo_chat, outputs=[chatbot, txt]) # ==================== # Aba VALID NODE com botão fixo # ==================== with gr.TabItem("🌐 VALID N.O.D.E"): gr.Markdown("### Acesse o VALID NODE clicando no botão abaixo:") gr.HTML( '' ) iface.launch()