import gradio as gr from transformers import pipeline import torch # Load the text generation pipeline with the model chatbot = pipeline( "text-generation", model="facebook/blenderbot-400M-distill", device=0 if torch.cuda.is_available() else -1 ) def chat(user_input): try: # Generate response response = chatbot(user_input, max_length=100, do_sample=True) # Return the generated text return response[0]['generated_text'] except Exception as e: return f"An error occurred: {str(e)}" # Create the Gradio interface iface = gr.Interface( fn=chat, inputs=gr.Textbox(lines=2, placeholder="Type your message here..."), outputs="text", title="Chatbot using Blenderbot", description="A simple chat application using the Blenderbot model." ) if __name__ == "__main__": iface.launch()