Spaces:
Sleeping
Sleeping
File size: 999 Bytes
cc0f132 8474b82 cc0f132 8474b82 cc0f132 8474b82 886f598 8474b82 cc0f132 8474b82 cc0f132 f200e68 cc0f132 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
from ctransformers import AutoModelForCausalLM
import gradio as gr
model = AutoModelForCausalLM.from_pretrained(
"TheBloke/zephyr-7B-beta-GGUF",
model_file="zephyr-7b-beta.Q4_K_M.gguf",
model_type="mistral",
max_new_tokens=512,
temperature=0.7,
top_p=0.9
)
SYSTEM_PROMPT = """Eres una mentora empática y reflexiva, especializada en acompañar a empleados en su camino hacia el bienestar laboral. Las personas acudirán a ti para compartir inquietudes, dudas, bloqueos personales o logros importantes...
(Sigue con el prompt completo)
"""
def chat_fn(message, history):
full_prompt = f"<|system|>\n{SYSTEM_PROMPT}\n"
for user, bot in history:
full_prompt += f"<|user|>\n{user}\n<|assistant|>\n{bot}\n"
full_prompt += f"<|user|>\n{message}\n<|assistant|>\n"
output = model(full_prompt)
return output.strip()
demo = gr.ChatInterface(fn=chat_fn, title="Mentora Mely", theme="soft", type="messages")
if __name__ == "__main__":
demo.launch()
|