|
import gradio as gr |
|
from langchain_community.vectorstores import FAISS |
|
from langchain_community.embeddings import HuggingFaceEmbeddings |
|
from openai import OpenAI |
|
import os |
|
import zipfile |
|
|
|
|
|
api_key = os.getenv("OPENAI_API_KEY") |
|
|
|
if not api_key: |
|
raise ValueError("Chiave API OpenAI non trovata. Assicurati di aver impostato OPENAI_API_KEY.") |
|
|
|
|
|
client = OpenAI(api_key=api_key) |
|
|
|
|
|
zip_path_m = "faiss_manual_index.zip" |
|
faiss_manual_index = "faiss_manual_index" |
|
|
|
|
|
if not os.path.exists(faiss_manual_index): |
|
os.makedirs(faiss_manual_index) |
|
|
|
|
|
zip_path_p = "faiss_problems_index.zip" |
|
faiss_problems_index = "faiss_problems_index" |
|
|
|
|
|
if not os.path.exists(faiss_problems_index): |
|
os.makedirs(faiss_problems_index) |
|
|
|
|
|
if os.path.exists(zip_path_m): |
|
with zipfile.ZipFile(zip_path_m, 'r') as zip_ref: |
|
zip_ref.extractall(faiss_manual_index) |
|
print(f"Files estratti nella directory: {faiss_manual_index}") |
|
else: |
|
print(f"File {zip_path_m} non trovato. Assicurati di caricarlo nello Space.") |
|
|
|
|
|
if os.path.exists(zip_path_p): |
|
with zipfile.ZipFile(zip_path_p, 'r') as zip_ref: |
|
zip_ref.extractall(faiss_problems_index) |
|
print(f"Files estratti nella directory: {faiss_problems_index}") |
|
else: |
|
print(f"File {zip_path_p} non trovato. Assicurati di caricarlo nello Space.") |
|
|
|
|
|
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/LaBSE") |
|
|
|
|
|
manual_vectorstore = FAISS.load_local(faiss_manual_index, embedding_model, allow_dangerous_deserialization=True) |
|
problems_vectorstore = FAISS.load_local(faiss_problems_index, embedding_model, allow_dangerous_deserialization=True) |
|
|
|
|
|
def search_and_answer(query): |
|
|
|
manual_results = manual_vectorstore.similarity_search(query, k=5) |
|
manual_output = "\n\n".join([doc.page_content for doc in manual_results]) |
|
|
|
|
|
problems_results = problems_vectorstore.similarity_search(query, k=1) |
|
problems_output = "\n\n".join([doc.page_content for doc in problems_results]) |
|
|
|
|
|
combined_text = f"Manual Results: {manual_output}\n\nProblems Results: {problems_output}" |
|
response = rispondi_a_domanda(query, combined_text) |
|
|
|
return manual_output, problems_output, response |
|
|
|
|
|
def rispondi_a_domanda(domanda, testo, max_token_risposta=550): |
|
print("verifica tutto",domanda,testo) |
|
try: |
|
|
|
|
|
risposta = client.chat.completions.create( |
|
model="gpt-3.5-turbo", |
|
messages=[ |
|
{"role": "system", "content": ( |
|
"Sei un esperto tecnico specializzato in macchine tessili da taglio.Rispondi in italiano alla domanda in modo chiaro," |
|
"Usa un tono professionale, ma accessibile per farsi capire da ogni operatore" |
|
"Controlla se il testo contiene informazioni sufficienti per rispondere, in caso contrario aiuta l'operatore a riformulare la domanda." |
|
)}, |
|
{"role": "user", "content": ( |
|
f"Domanda: {domanda}\n" |
|
f"Testo: {testo}\n" |
|
"Rispondi in modo chiaro e operativo per un tecnico che deve svolgere la mansione." |
|
)} |
|
], |
|
max_tokens=max_token_risposta, |
|
temperature=0.5, |
|
) |
|
|
|
|
|
risposta_finale = risposta.choices[0].message.content |
|
return risposta_finale |
|
|
|
except Exception as e: |
|
print(f"Si è verificato un errore: {e}") |
|
return "Errore nell'elaborazione della risposta." |
|
|
|
|
|
examples = [ |
|
["How to change the knife?"], |
|
["What are the safety precautions for using the machine?"], |
|
["Who to contact for assistance in case of problems ?"] |
|
] |
|
|
|
iface = gr.Interface( |
|
fn=search_and_answer, |
|
inputs=gr.Textbox(lines=2, placeholder="Enter your question here..."), |
|
outputs=[ |
|
gr.Textbox(label="Manual Results"), |
|
gr.Textbox(label="Issues Results"), |
|
gr.Textbox(label="GPT-Generated Answer") |
|
], |
|
examples=examples, |
|
title="Manual Querying System with GPT", |
|
description="Enter a question to get relevant information extracted from the manual and related issues, followed by a GPT-generated answer." |
|
) |
|
|
|
|
|
iface.launch() |
|
|
|
|