ggg / app.py
88ggg's picture
Update app.py
ca1817d verified
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
import gradio as gr
import warnings
# 1. Configuraci贸n a prueba de errores
MODEL_NAME = "HuggingFaceH4/zephyr-7b-beta" # Modelo optimizado para Spaces
DEVICE = "cuda" if torch.cuda.is_available() else "cpu"
# 2. Carga segura del modelo
try:
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
torch_dtype=torch.float16,
device_map="auto",
low_cpu_mem_usage=True
)
if DEVICE == "cuda":
model = model.to(DEVICE)
except Exception as e:
raise gr.Error(f"Error inicializaci贸n: {str(e)}")
# 3. Funci贸n de chat mejorada
def generate_response(message, history):
try:
# Limpieza de memoria
if DEVICE == "cuda":
torch.cuda.empty_cache()
# Formateo del prompt
messages = [{"role": "user", "content": message}]
prompt = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
# Generaci贸n con par谩metros seguros
inputs = tokenizer(prompt, return_tensors="pt").to(DEVICE)
outputs = model.generate(
**inputs,
max_new_tokens=256,
temperature=0.7,
do_sample=True,
pad_token_id=tokenizer.eos_token_id
)
# Decodificaci贸n segura
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
return response.split("assistant\n")[-1].strip()
except Exception as e:
warnings.warn(str(e))
return f"Error: {str(e)}"
# 4. Interfaz a prueba de fallos
with gr.Blocks(theme=gr.themes.Soft()) as demo:
gr.Markdown("## 馃 Chatbot Gerardo - Versi贸n Estable")
chatbot = gr.ChatInterface(
fn=generate_response,
examples=["Hola", "驴C贸mo est谩s?"],
title="Chatbot Personalizado",
description="Asistente IA creado por Gerardo",
cache_examples=False
)
if __name__ == "__main__":
demo.launch(server_name="0.0.0.0", server_port=7860)