Spaces:
Sleeping
Sleeping
import gradio as gr | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
from logic import get_author_by_title, get_year_by_title, get_summary_by_title, find_book_by_description | |
# Load the fine-tuned model | |
model_name = "LuisMBA/sci_fi_books_chat_100_ex" | |
tokenizer = AutoTokenizer.from_pretrained('gpt2') | |
model = AutoModelForCausalLM.from_pretrained(model_name) | |
def chatbot_response(prompt): | |
# Detect intent and handle queries (answer questions directly -> more reliable) | |
'''if "author of" in prompt.lower(): | |
title = prompt.split("of")[-1].strip() | |
return get_author_by_title(title) | |
elif "year" in prompt.lower(): | |
title = prompt.split("book")[-1].strip() | |
return get_year_by_title(title) | |
elif "about" in prompt.lower() or "summary" in prompt.lower(): | |
title = prompt.split("book")[-1].strip() | |
return get_summary_by_title(title) | |
elif "book about" in prompt.lower() or "recommend a book" in prompt.lower(): | |
description = prompt.split("about")[-1].strip() | |
return find_book_by_description(description)''' | |
# Default: Use the fine-tuned model | |
inputs = tokenizer(prompt, return_tensors="pt") | |
outputs = model.generate(**inputs, max_new_tokens=100) | |
return tokenizer.decode(outputs[0], skip_special_tokens=True) | |
# Gradio interface | |
iface = gr.Interface( | |
fn=chatbot_response, | |
inputs=gr.Textbox(label="Ask about books"), | |
outputs=gr.Textbox(label="Response"), | |
title="Book Q&A Chatbot" | |
) | |
if __name__ == "__main__": | |
iface.launch() | |