Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import AutoTokenizer, AutoModelForSeq2SeqLM | |
| model = AutoModelForSeq2SeqLM.from_pretrained("memorease/memorease-flan-t5") | |
| tokenizer = AutoTokenizer.from_pretrained("memorease/memorease-flan-t5") | |
| def generate_question(description): | |
| prompt = f"Only generate a factual and relevant question about this memory: {description}" | |
| inputs = tokenizer(prompt, return_tensors="pt", padding=True, truncation=True) | |
| outputs = model.generate(**inputs, max_new_tokens=32) | |
| return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # ✅ DOĞRU Interface tanımı | |
| demo = gr.Interface( | |
| fn=generate_question, | |
| inputs=gr.Textbox(label="Memory Description"), | |
| outputs=gr.Textbox(label="Generated Question"), | |
| allow_flagging="never", # Opsiyonel: kullanıcı flag'lamasın diye | |
| live=False # Gerek yoksa canlı inference yok | |
| ) | |
| demo.launch(server_name="0.0.0.0", server_port=7860) | |