File size: 8,044 Bytes
866e761
3e38fd1
866e761
5aa00b5
866e761
 
 
 
5227ce9
5aa00b5
866e761
 
 
 
 
 
 
 
 
5aa00b5
866e761
 
 
 
 
5aa00b5
866e761
 
 
 
 
 
 
5aa00b5
866e761
 
 
 
 
5aa00b5
866e761
 
 
 
 
 
5aa00b5
866e761
 
 
 
 
 
5aa00b5
866e761
 
 
 
 
5aa00b5
866e761
 
 
 
5aa00b5
866e761
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8767750
866e761
 
 
 
 
 
 
 
 
 
 
 
8767750
866e761
 
3e38fd1
866e761
 
 
 
 
 
 
 
 
cfd82c3
f56586e
866e761
 
8767750
3e38fd1
8767750
4a87c27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5aa00b5
24430f3
f56586e
 
 
3e38fd1
f56586e
 
4a87c27
3e38fd1
 
 
 
5677071
 
cfd82c3
 
 
 
 
3e38fd1
a9002ff
a93737e
a9002ff
 
 
235401a
884e93b
 
235401a
 
cfd82c3
 
 
4a87c27
cfd82c3
5227ce9
 
 
3e38fd1
5227ce9
cfd82c3
3e38fd1
884e93b
 
cfd82c3
884e93b
 
 
f56586e
 
884e93b
cfd82c3
 
 
 
3e38fd1
b46538c
0350558
b46538c
2ca3e3a
 
b46538c
5d5e521
835b784
b46538c
b7105dd
bd40709
2f557a8
cfd82c3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
# ================================
# ChatVLD Futurista - Gradio 5.x
# ================================

import os
from pathlib import Path
import requests
import gradio as gr
import time

from langchain_groq import ChatGroq
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.messages import AIMessage, HumanMessage
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain.chains import create_history_aware_retriever, create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain

# ================================
# API KEY
# ================================
if "GROQ_API_KEY" not in os.environ:
    raise ValueError("❌ A variável de ambiente GROQ_API_KEY não está definida.")

# ================================
# PDFs
# ================================
pdf_urls = {
    "documento.pdf": "https://drive.google.com/uc?id=1hin1AVM6vh0adcqH0RBOmEXZrn_Ebfyk",
    "documento2.pdf": "https://drive.google.com/uc?id=1GVbPq8SDriIS5CQo0kT0EZEqwWwjGJmY"
}

for pdf_path, pdf_url in pdf_urls.items():
    if not Path(pdf_path).exists():
        r = requests.get(pdf_url)
        with open(pdf_path, "wb") as f:
            f.write(r.content)

# ================================
# CONFIG
# ================================
ID_MODEL = "deepseek-r1-distill-llama-70b"
TEMPERATURE = 0.7
FAISS_INDEX_PATH = "index_faiss"

# ================================
# FUNÇÕES
# ================================
def load_llm(model_id, temperature):
    return ChatGroq(
        model=model_id,
        temperature=temperature,
        groq_api_key=os.environ["GROQ_API_KEY"],
        max_tokens=None,
        timeout=None,
        max_retries=2,
    )

def extract_text_pdf(file_path):
    loader = PyMuPDFLoader(file_path)
    docs = loader.load()
    return "\n".join([page.page_content for page in docs])

def config_retriever(pdf_files):
    embeddings = HuggingFaceEmbeddings(model_name="BAAI/bge-m3")
    if Path(FAISS_INDEX_PATH).exists():
        vectorstore = FAISS.load_local(FAISS_INDEX_PATH, embeddings, allow_dangerous_deserialization=True)
    else:
        all_texts = ""
        for file_path in pdf_files:
            all_texts += extract_text_pdf(file_path) + "\n"
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)
        chunks = text_splitter.split_text(all_texts)
        vectorstore = FAISS.from_texts(chunks, embeddings)
        vectorstore.save_local(FAISS_INDEX_PATH)
    return vectorstore.as_retriever(search_type='mmr', search_kwargs={'k': 3, 'fetch_k': 4})

def config_rag_chain(llm, retriever):
    context_q_prompt = ChatPromptTemplate.from_messages([
        ("system","Dada a conversa e a pergunta, formule uma pergunta independente."),
        MessagesPlaceholder("chat_history"),
        ("human", "Pergunta: {input}"),
    ])
    hist_aware_retriever = create_history_aware_retriever(
        llm=llm,
        retriever=retriever,
        prompt=context_q_prompt
    )

    system_prompt = """Você é um assistente virtual futurista da empresa VLD. Responda de forma clara e objetiva em português. Se não souber, diga que não sabe."""

    qa_prompt = ChatPromptTemplate.from_messages([
        ("system", system_prompt),
        MessagesPlaceholder("chat_history"),
        ("human", "Pergunta: {input}\n\nContexto: {context}"),
    ])
    qa_chain = create_stuff_documents_chain(llm, qa_prompt)
    return create_retrieval_chain(hist_aware_retriever, qa_chain)

llm = load_llm(ID_MODEL, TEMPERATURE)
retriever_instance = config_retriever(list(pdf_urls.keys()))
rag_chain = config_rag_chain(llm, retriever_instance)

chat_history = [AIMessage(content="🚀 Olá, sou o seu suporte virtual futurista! Como posso te ajudar?")]

def responder(pergunta):
    chat_history.append(HumanMessage(content=pergunta))
    try:
        resposta = rag_chain.invoke({"input": pergunta, "chat_history": chat_history})["answer"]
        resposta = resposta.split("</think>")[-1].strip() if "</think>" in resposta else resposta.strip()
    except Exception as e:
        resposta = f"❌ Erro: {str(e)}"
    chat_history.append(AIMessage(content=resposta))
    return resposta

# ================================
# CSS FUTURISTA
# ================================
custom_css = """
.gradio-chatbot { background-color: #f8f9fa; color: #111; }
.gradio-textbox textarea { background-color: #1c1c1c; color: #fff; border-radius: 8px; border: 1px solid #333; padding: 8px; }
.gradio-button, .gradio-button:active { background: linear-gradient(to right, #00c6ff, #0072ff); color: #fff; border: none; border-radius: 8px; }

/* 🎇 Estilo futurista para o título principal */
#titulo-principal {
    text-align: center;
    font-size: 32px;
    font-weight: bold;
    background: linear-gradient(90deg, #00c6ff, #8a2be2, #ff0080, #00c6ff);
    -webkit-background-clip: text;
    -webkit-text-fill-color: transparent;
    animation: brilho 5s linear infinite;
    margin-bottom: 20px;
}

@keyframes brilho {
    0% { background-position: 0% 50%; }
    50% { background-position: 100% 50%; }
    100% { background-position: 0% 50%; }
}
"""

# ================================
# INTERFACE GRADIO 5.x
# ================================
with gr.Blocks(css=custom_css, theme="soft") as iface:
    # TÍTULO PRINCIPAL
    gr.Markdown("🛸 SUPORTE AUTOMATIZADO", elem_id="titulo-principal")

    with gr.Tabs():
        # ====================
        # Aba Chat Futurista
        # ====================
        with gr.TabItem("💭 ChatVLD"):
            gr.Markdown("""🤖 **Olá!**  
            Sou o suporte para as impressoras **Codeline SS5632** e **LINX 7900**.  
            Em que posso ajudar?""")

            chatbot = gr.Chatbot(type="messages")

            with gr.Row():
                txt = gr.Textbox(
                    placeholder="Diz o que tu quer macho...",
                    show_label=False,
                    lines=2
                )
                submit_btn = gr.Button("🚀 Arrocha")

            with gr.Row():
                clear_btn = gr.Button("🧹 Barrer a prosa")
                new_chat_btn = gr.Button("✨ Nova prosa")

            def enviar(msg, history):
                history.append({"role": "user", "content": msg})
                yield history, ""

                history.append({"role": "assistant", "content": "🤖 Digitando..."})
                yield history, ""

                resposta = responder(msg)
                history[-1] = {"role": "assistant", "content": resposta}
                yield history, ""

            def limpar():
                chat_history.clear()
                return [], ""

            def novo_chat():
                chat_history.clear()
                chat_history.append(AIMessage(content="🤖 Novo chat iniciado. Como posso te ajudar?"))
                return [{"role": "assistant", "content": "🤖 Novo chat iniciado. Como posso te ajudar?"}], ""

            txt.submit(enviar, [txt, chatbot], [chatbot, txt])
            submit_btn.click(enviar, [txt, chatbot], [chatbot, txt])
            clear_btn.click(limpar, outputs=[chatbot, txt])
            new_chat_btn.click(novo_chat, outputs=[chatbot, txt])

        # ====================
        # Aba VALID NODE com botão fixo
        # ====================
        with gr.TabItem("🌐 VALIDNODE"):
            gr.Markdown("### Acesse o VALID NODE clicando no botão abaixo:")
            gr.HTML(
                '<button onclick="window.open(\'https://172.17.200.97\', \'_blank\')" '
                'style="background: linear-gradient(to right, #00c6ff, #0072ff); '
                'color: #fff; border: none; border-radius: 8px; padding: 10px 20px; '
                'font-size: 16px; cursor: pointer;">🖥️ VALIDeNODE</button>'
            )

iface.launch()